diff --git a/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..268c00e0364506fa384d9b019001de22beee5332 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Tsuyoshi Hombashi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..4f7b94510223515d0c0fb8f7c7feaae4cff7509f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/METADATA @@ -0,0 +1,276 @@ +Metadata-Version: 2.1 +Name: DataProperty +Version: 1.0.1 +Summary: Python library for extract property from data. +Home-page: https://github.com/thombashi/DataProperty +Author: Tsuyoshi Hombashi +Author-email: tsuyoshi.hombashi@gmail.com +Maintainer: Tsuyoshi Hombashi +Maintainer-email: tsuyoshi.hombashi@gmail.com +License: MIT License +Project-URL: Source, https://github.com/thombashi/DataProperty +Project-URL: Tracker, https://github.com/thombashi/DataProperty/issues +Keywords: data,library,property +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: mbstrdecoder (<2,>=1.0.0) +Requires-Dist: typepy[datetime] (<2,>=1.2.0) +Provides-Extra: logging +Requires-Dist: loguru (<1,>=0.4.1) ; extra == 'logging' +Provides-Extra: test +Requires-Dist: pytest (>=6.0.1) ; extra == 'test' +Requires-Dist: pytest-md-report (>=0.3) ; extra == 'test' +Requires-Dist: tcolorpy (>=0.1.2) ; extra == 'test' + +.. contents:: **DataProperty** + :backlinks: top + :local: + + +Summary +======= +A Python library for extract property from data. + + +.. image:: https://badge.fury.io/py/DataProperty.svg + :target: https://badge.fury.io/py/DataProperty + :alt: PyPI package version + +.. image:: https://anaconda.org/conda-forge/DataProperty/badges/version.svg + :target: https://anaconda.org/conda-forge/DataProperty + :alt: conda-forge package version + +.. image:: https://img.shields.io/pypi/pyversions/DataProperty.svg + :target: https://pypi.org/project/DataProperty + :alt: Supported Python versions + +.. image:: https://img.shields.io/pypi/implementation/DataProperty.svg + :target: https://pypi.org/project/DataProperty + :alt: Supported Python implementations + +.. image:: https://github.com/thombashi/DataProperty/actions/workflows/ci.yml/badge.svg + :target: https://github.com/thombashi/DataProperty/actions/workflows/ci.yml + :alt: CI status of Linux/macOS/Windows + +.. image:: https://coveralls.io/repos/github/thombashi/DataProperty/badge.svg?branch=master + :target: https://coveralls.io/github/thombashi/DataProperty?branch=master + :alt: Test coverage + +.. image:: https://github.com/thombashi/DataProperty/actions/workflows/github-code-scanning/codeql/badge.svg + :target: https://github.com/thombashi/DataProperty/actions/workflows/github-code-scanning/codeql + :alt: CodeQL + + +Installation +============ + +Installation: pip +------------------------------ +:: + + pip install DataProperty + +Installation: conda +------------------------------ +:: + + conda install -c conda-forge dataproperty + +Installation: apt +------------------------------ +:: + + sudo add-apt-repository ppa:thombashi/ppa + sudo apt update + sudo apt install python3-dataproperty + + +Usage +===== + +Extract property of data +------------------------ + +e.g. Extract a ``float`` value property +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. code:: python + + >>> from dataproperty import DataProperty + >>> DataProperty(-1.1) + data=-1.1, type=REAL_NUMBER, align=right, ascii_width=4, int_digits=1, decimal_places=1, extra_len=1 + +e.g. Extract a ``int`` value property +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. code:: python + + >>> from dataproperty import DataProperty + >>> DataProperty(123456789) + data=123456789, type=INTEGER, align=right, ascii_width=9, int_digits=9, decimal_places=0, extra_len=0 + +e.g. Extract a ``str`` (ascii) value property +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. code:: python + + >>> from dataproperty import DataProperty + >>> DataProperty("sample string") + data=sample string, type=STRING, align=left, length=13, ascii_width=13, extra_len=0 + +e.g. Extract a ``str`` (multi-byte) value property +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. code:: python + + >>> from dataproperty import DataProperty + >>> str(DataProperty("吾輩は猫である")) + data=吾輩は猫である, type=STRING, align=left, length=7, ascii_width=14, extra_len=0 + +e.g. Extract a time (``datetime``) value property +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. code:: python + + >>> import datetime + >>> from dataproperty import DataProperty + >>> DataProperty(datetime.datetime(2017, 1, 1, 0, 0, 0)) + data=2017-01-01 00:00:00, type=DATETIME, align=left, ascii_width=19, extra_len=0 + +e.g. Extract a ``bool`` value property +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.. code:: python + + >>> from dataproperty import DataProperty + >>> DataProperty(True) + data=True, type=BOOL, align=left, ascii_width=4, extra_len=0 + + +Extract data property for each element from a matrix +---------------------------------------------------- +``DataPropertyExtractor.to_dp_matrix`` method returns a matrix of ``DataProperty`` instances from a data matrix. +An example data set and the result are as follows: + +:Sample Code: + .. code:: python + + import datetime + from dataproperty import DataPropertyExtractor + + dp_extractor = DataPropertyExtractor() + dt = datetime.datetime(2017, 1, 1, 0, 0, 0) + inf = float("inf") + nan = float("nan") + + dp_matrix = dp_extractor.to_dp_matrix([ + [1, 1.1, "aa", 1, 1, True, inf, nan, dt], + [2, 2.2, "bbb", 2.2, 2.2, False, "inf", "nan", dt], + [3, 3.33, "cccc", -3, "ccc", "true", inf, "NAN", "2017-01-01T01:23:45+0900"], + ]) + + for row, dp_list in enumerate(dp_matrix): + for col, dp in enumerate(dp_list): + print("row={:d}, col={:d}, {}".format(row, col, str(dp))) + +:Output: + :: + + row=0, col=0, data=1, type=INTEGER, align=right, ascii_width=1, int_digits=1, decimal_places=0, extra_len=0 + row=0, col=1, data=1.1, type=REAL_NUMBER, align=right, ascii_width=3, int_digits=1, decimal_places=1, extra_len=0 + row=0, col=2, data=aa, type=STRING, align=left, ascii_width=2, length=2, extra_len=0 + row=0, col=3, data=1, type=INTEGER, align=right, ascii_width=1, int_digits=1, decimal_places=0, extra_len=0 + row=0, col=4, data=1, type=INTEGER, align=right, ascii_width=1, int_digits=1, decimal_places=0, extra_len=0 + row=0, col=5, data=True, type=BOOL, align=left, ascii_width=4, extra_len=0 + row=0, col=6, data=Infinity, type=INFINITY, align=left, ascii_width=8, extra_len=0 + row=0, col=7, data=NaN, type=NAN, align=left, ascii_width=3, extra_len=0 + row=0, col=8, data=2017-01-01 00:00:00, type=DATETIME, align=left, ascii_width=19, extra_len=0 + row=1, col=0, data=2, type=INTEGER, align=right, ascii_width=1, int_digits=1, decimal_places=0, extra_len=0 + row=1, col=1, data=2.2, type=REAL_NUMBER, align=right, ascii_width=3, int_digits=1, decimal_places=1, extra_len=0 + row=1, col=2, data=bbb, type=STRING, align=left, ascii_width=3, length=3, extra_len=0 + row=1, col=3, data=2.2, type=REAL_NUMBER, align=right, ascii_width=3, int_digits=1, decimal_places=1, extra_len=0 + row=1, col=4, data=2.2, type=REAL_NUMBER, align=right, ascii_width=3, int_digits=1, decimal_places=1, extra_len=0 + row=1, col=5, data=False, type=BOOL, align=left, ascii_width=5, extra_len=0 + row=1, col=6, data=Infinity, type=INFINITY, align=left, ascii_width=8, extra_len=0 + row=1, col=7, data=NaN, type=NAN, align=left, ascii_width=3, extra_len=0 + row=1, col=8, data=2017-01-01 00:00:00, type=DATETIME, align=left, ascii_width=19, extra_len=0 + row=2, col=0, data=3, type=INTEGER, align=right, ascii_width=1, int_digits=1, decimal_places=0, extra_len=0 + row=2, col=1, data=3.33, type=REAL_NUMBER, align=right, ascii_width=4, int_digits=1, decimal_places=2, extra_len=0 + row=2, col=2, data=cccc, type=STRING, align=left, ascii_width=4, length=4, extra_len=0 + row=2, col=3, data=-3, type=INTEGER, align=right, ascii_width=2, int_digits=1, decimal_places=0, extra_len=1 + row=2, col=4, data=ccc, type=STRING, align=left, ascii_width=3, length=3, extra_len=0 + row=2, col=5, data=True, type=BOOL, align=left, ascii_width=4, extra_len=0 + row=2, col=6, data=Infinity, type=INFINITY, align=left, ascii_width=8, extra_len=0 + row=2, col=7, data=NaN, type=NAN, align=left, ascii_width=3, extra_len=0 + row=2, col=8, data=2017-01-01T01:23:45+0900, type=STRING, align=left, ascii_width=24, length=24, extra_len=0 + + +Full example source code can be found at *examples/py/to_dp_matrix.py* + + +Extract properties for each column from a matrix +------------------------------------------------------ +``DataPropertyExtractor.to_column_dp_list`` method returns a list of ``DataProperty`` instances from a data matrix. The list represents the properties for each column. +An example data set and the result are as follows: + +Example data set and result are as follows: + +:Sample Code: + .. code:: python + + import datetime + from dataproperty import DataPropertyExtractor + + dp_extractor = DataPropertyExtractor() + dt = datetime.datetime(2017, 1, 1, 0, 0, 0) + inf = float("inf") + nan = float("nan") + + data_matrix = [ + [1, 1.1, "aa", 1, 1, True, inf, nan, dt], + [2, 2.2, "bbb", 2.2, 2.2, False, "inf", "nan", dt], + [3, 3.33, "cccc", -3, "ccc", "true", inf, "NAN", "2017-01-01T01:23:45+0900"], + ] + + dp_extractor.headers = ["int", "float", "str", "num", "mix", "bool", "inf", "nan", "time"] + col_dp_list = dp_extractor.to_column_dp_list(dp_extractor.to_dp_matrix(dp_matrix)) + + for col_idx, col_dp in enumerate(col_dp_list): + print(str(col_dp)) + +:Output: + :: + + column=0, type=INTEGER, align=right, ascii_width=3, bit_len=2, int_digits=1, decimal_places=0 + column=1, type=REAL_NUMBER, align=right, ascii_width=5, int_digits=1, decimal_places=(min=1, max=2) + column=2, type=STRING, align=left, ascii_width=4 + column=3, type=REAL_NUMBER, align=right, ascii_width=4, int_digits=1, decimal_places=(min=0, max=1), extra_len=(min=0, max=1) + column=4, type=STRING, align=left, ascii_width=3, int_digits=1, decimal_places=(min=0, max=1) + column=5, type=BOOL, align=left, ascii_width=5 + column=6, type=INFINITY, align=left, ascii_width=8 + column=7, type=NAN, align=left, ascii_width=3 + column=8, type=STRING, align=left, ascii_width=24 + + +Full example source code can be found at *examples/py/to_column_dp_list.py* + + +Dependencies +============ +- Python 3.7+ +- `Python package dependencies (automatically installed) `__ + +Optional dependencies +--------------------- +- `loguru `__ + - Used for logging if the package installed diff --git a/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..0aa54cbbd309ba69ebffa0b355b9ac20e98a0a25 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/RECORD @@ -0,0 +1,47 @@ +DataProperty-1.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +DataProperty-1.0.1.dist-info/LICENSE,sha256=qT11vLB3TimQEGOAytrW3LLeGTxV1DX_xWujRaCLHcI,1084 +DataProperty-1.0.1.dist-info/METADATA,sha256=BxNvMErHIPajm-sKqeSWNuN7mZwJU7L-m87uzOUQpb4,11519 +DataProperty-1.0.1.dist-info/RECORD,, +DataProperty-1.0.1.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92 +DataProperty-1.0.1.dist-info/top_level.txt,sha256=RiW0aJCSmIPslrGSqg9wyPRas0Rl7Kcdi_fBBEd0-LY,13 +dataproperty/__init__.py,sha256=y_LoBUs28gC7b7AXv49X1XCPHckXo3oKECpW-Oj6LbM,1308 +dataproperty/__pycache__/__init__.cpython-310.pyc,, +dataproperty/__pycache__/__version__.cpython-310.pyc,, +dataproperty/__pycache__/_align.cpython-310.pyc,, +dataproperty/__pycache__/_align_getter.cpython-310.pyc,, +dataproperty/__pycache__/_base.cpython-310.pyc,, +dataproperty/__pycache__/_column.cpython-310.pyc,, +dataproperty/__pycache__/_common.cpython-310.pyc,, +dataproperty/__pycache__/_container.cpython-310.pyc,, +dataproperty/__pycache__/_converter.cpython-310.pyc,, +dataproperty/__pycache__/_dataproperty.cpython-310.pyc,, +dataproperty/__pycache__/_extractor.cpython-310.pyc,, +dataproperty/__pycache__/_formatter.cpython-310.pyc,, +dataproperty/__pycache__/_function.cpython-310.pyc,, +dataproperty/__pycache__/_interface.cpython-310.pyc,, +dataproperty/__pycache__/_line_break.cpython-310.pyc,, +dataproperty/__pycache__/_preprocessor.cpython-310.pyc,, +dataproperty/__pycache__/typing.cpython-310.pyc,, +dataproperty/__version__.py,sha256=67tYZapqaNY9QXFm4kAOxyg6b6T1ttw2NjFPHfyCkkc,201 +dataproperty/_align.py,sha256=VQCp3HUN-rw5lDcG0CHwoQNwabSOwMF8Fpn52nHpQs8,535 +dataproperty/_align_getter.py,sha256=GV8rvnGaF8-8C6E7SNa3SsXw-gp80jR93knG_XDwcZQ,833 +dataproperty/_base.py,sha256=WfDF5FqUFRm9_Aw8T0H5AxyKyvaz4Fv3Z0x7lDzzLTM,2514 +dataproperty/_column.py,sha256=Y7Xn16Jtc8vBMcqarrulNVzV4A3-TkYOQxkGXmup4lw,11653 +dataproperty/_common.py,sha256=scfSVZRoBT74UIOYS99lZye06OUbT9347QpbxRhIi8M,1915 +dataproperty/_container.py,sha256=NT-zFw68PqCCV8wcK7sTuIKlnW3eStVA0gkiO0DcBkY,5130 +dataproperty/_converter.py,sha256=rEYWC1rcBIgi2WRM9PrLAycoOs9uSsYUsXaAlW5dWzM,3269 +dataproperty/_dataproperty.py,sha256=Mq8J1pcJIqI2PbOfqH0CUF0aUzGhJnfdlTuzpz8-5wU,11321 +dataproperty/_extractor.py,sha256=Rg_z5aKUGulUxi0Y3iGhLCEQ2nQpMYRbU8-Dd7XfyG4,25899 +dataproperty/_formatter.py,sha256=nqQkEhtYKfG6WskuuN8_0mw3tpGNov8kJ6VBK36VYUA,3000 +dataproperty/_function.py,sha256=h48XjTqYuXwFI1xeerFIIAlaWINxtLXEDw91ZuF_AuQ,3115 +dataproperty/_interface.py,sha256=nronY0GKDo5AkgXjM7wvpYY8cx5SmpxpBiDLLbW6NSY,626 +dataproperty/_line_break.py,sha256=FGjtuWKftOchoeJZJ9DxHJ9DUY0PPO_tPTiAM1e-Wck,114 +dataproperty/_preprocessor.py,sha256=7v-Py61jZK9SkNrpaHrmJLdwMbjumpsfzk6JU2PiThw,5467 +dataproperty/logger/__init__.py,sha256=2kFcgMA8P4-c51nShgJQsY31tbbLvvsfSGDLXTOj9ig,88 +dataproperty/logger/__pycache__/__init__.cpython-310.pyc,, +dataproperty/logger/__pycache__/_logger.cpython-310.pyc,, +dataproperty/logger/__pycache__/_null_logger.cpython-310.pyc,, +dataproperty/logger/_logger.py,sha256=edZ7M2Hf9zjSMr4iRi_IYAcf3l1EiLIVqhCEtf0AFHg,442 +dataproperty/logger/_null_logger.py,sha256=xWCR2KAa2aKAcpKi8DosfCOgaRMb_YXr9MKrK7xMD-A,1071 +dataproperty/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +dataproperty/typing.py,sha256=YhjN4wF_7uqG9tPUbFLFemWIzx3WgyJJFhTh62TyhJU,1403 diff --git a/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..1f37c02f2eb2e26b306202feaccb31e522b8b169 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.40.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..53de7838246225ad49df896210fdcf03ddf9b888 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/DataProperty-1.0.1.dist-info/top_level.txt @@ -0,0 +1 @@ +dataproperty diff --git a/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2f1b8e15e5627d92f0521605c9870bc8e5505cb4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2017-2021 Ingy döt Net +Copyright (c) 2006-2016 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..c8905983e369893f68879f4cdfb7290d54d5f822 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/METADATA @@ -0,0 +1,46 @@ +Metadata-Version: 2.1 +Name: PyYAML +Version: 6.0.1 +Summary: YAML parser and emitter for Python +Home-page: https://pyyaml.org/ +Download-URL: https://pypi.org/project/PyYAML/ +Author: Kirill Simonov +Author-email: xi@resolvent.net +License: MIT +Project-URL: Bug Tracker, https://github.com/yaml/pyyaml/issues +Project-URL: CI, https://github.com/yaml/pyyaml/actions +Project-URL: Documentation, https://pyyaml.org/wiki/PyYAMLDocumentation +Project-URL: Mailing lists, http://lists.sourceforge.net/lists/listinfo/yaml-core +Project-URL: Source Code, https://github.com/yaml/pyyaml +Platform: Any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup +Requires-Python: >=3.6 +License-File: LICENSE + +YAML is a data serialization format designed for human readability +and interaction with scripting languages. PyYAML is a YAML parser +and emitter for Python. + +PyYAML features a complete YAML 1.1 parser, Unicode support, pickle +support, capable extension API, and sensible error messages. PyYAML +supports standard YAML tags and provides Python-specific tags that +allow to represent an arbitrary Python object. + +PyYAML is applicable for a broad range of tasks from complex +configuration files to object serialization and persistence. diff --git a/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..90aeed85f892d7de7ae8ffad5477859e10a5cde1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/RECORD @@ -0,0 +1,43 @@ +PyYAML-6.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +PyYAML-6.0.1.dist-info/LICENSE,sha256=jTko-dxEkP1jVwfLiOsmvXZBAqcoKVQwfT5RZ6V36KQ,1101 +PyYAML-6.0.1.dist-info/METADATA,sha256=UNNF8-SzzwOKXVo-kV5lXUGH2_wDWMBmGxqISpp5HQk,2058 +PyYAML-6.0.1.dist-info/RECORD,, +PyYAML-6.0.1.dist-info/WHEEL,sha256=iZaXX0Td62Nww8bojl0E84uJHjT41csHPKZmbUBbJPs,152 +PyYAML-6.0.1.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11 +_yaml/__init__.py,sha256=04Ae_5osxahpJHa3XBZUAf4wi6XX32gR8D6X6p64GEA,1402 +_yaml/__pycache__/__init__.cpython-310.pyc,, +yaml/__init__.py,sha256=bhl05qSeO-1ZxlSRjGrvl2m9nrXb1n9-GQatTN0Mrqc,12311 +yaml/__pycache__/__init__.cpython-310.pyc,, +yaml/__pycache__/composer.cpython-310.pyc,, +yaml/__pycache__/constructor.cpython-310.pyc,, +yaml/__pycache__/cyaml.cpython-310.pyc,, +yaml/__pycache__/dumper.cpython-310.pyc,, +yaml/__pycache__/emitter.cpython-310.pyc,, +yaml/__pycache__/error.cpython-310.pyc,, +yaml/__pycache__/events.cpython-310.pyc,, +yaml/__pycache__/loader.cpython-310.pyc,, +yaml/__pycache__/nodes.cpython-310.pyc,, +yaml/__pycache__/parser.cpython-310.pyc,, +yaml/__pycache__/reader.cpython-310.pyc,, +yaml/__pycache__/representer.cpython-310.pyc,, +yaml/__pycache__/resolver.cpython-310.pyc,, +yaml/__pycache__/scanner.cpython-310.pyc,, +yaml/__pycache__/serializer.cpython-310.pyc,, +yaml/__pycache__/tokens.cpython-310.pyc,, +yaml/_yaml.cpython-310-x86_64-linux-gnu.so,sha256=_9iVrASatQgQSFXlKeCe2uK2TyKwk9nd61Cs_-fqAHM,2226000 +yaml/composer.py,sha256=_Ko30Wr6eDWUeUpauUGT3Lcg9QPBnOPVlTnIMRGJ9FM,4883 +yaml/constructor.py,sha256=kNgkfaeLUkwQYY_Q6Ff1Tz2XVw_pG1xVE9Ak7z-viLA,28639 +yaml/cyaml.py,sha256=6ZrAG9fAYvdVe2FK_w0hmXoG7ZYsoYUwapG8CiC72H0,3851 +yaml/dumper.py,sha256=PLctZlYwZLp7XmeUdwRuv4nYOZ2UBnDIUy8-lKfLF-o,2837 +yaml/emitter.py,sha256=jghtaU7eFwg31bG0B7RZea_29Adi9CKmXq_QjgQpCkQ,43006 +yaml/error.py,sha256=Ah9z-toHJUbE9j-M8YpxgSRM5CgLCcwVzJgLLRF2Fxo,2533 +yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445 +yaml/loader.py,sha256=UVa-zIqmkFSCIYq_PgSGm4NSJttHY2Rf_zQ4_b1fHN0,2061 +yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440 +yaml/parser.py,sha256=ilWp5vvgoHFGzvOZDItFoGjD6D42nhlZrZyjAwa0oJo,25495 +yaml/reader.py,sha256=0dmzirOiDG4Xo41RnuQS7K9rkY3xjHiVasfDMNTqCNw,6794 +yaml/representer.py,sha256=IuWP-cAW9sHKEnS0gCqSa894k1Bg4cgTxaDwIcbRQ-Y,14190 +yaml/resolver.py,sha256=9L-VYfm4mWHxUD1Vg4X7rjDRK_7VZd6b92wzq7Y2IKY,9004 +yaml/scanner.py,sha256=YEM3iLZSaQwXcQRg2l2R4MdT0zGP2F9eHkKGKnHyWQY,51279 +yaml/serializer.py,sha256=ChuFgmhU01hj4xgI8GaKv6vfM2Bujwa9i7d2FAHj7cA,4165 +yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573 diff --git a/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..2d1b4b87130cdd6e713250ed45eec87c87d0405f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.40.0) +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_17_x86_64 +Tag: cp310-cp310-manylinux2014_x86_64 + diff --git a/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..e6475e911f628412049bc4090d86f23ac403adde --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/PyYAML-6.0.1.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_yaml +yaml diff --git a/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..7082a2d5b9047bfc09589f387053e24ea490bc54 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2013-2019 Nikolay Kim and Andrew Svetlov + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..fc964525f05e8e34961f0398b1930b8dec64ef26 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/METADATA @@ -0,0 +1,128 @@ +Metadata-Version: 2.1 +Name: aiosignal +Version: 1.3.1 +Summary: aiosignal: a list of registered asynchronous callbacks +Home-page: https://github.com/aio-libs/aiosignal +Maintainer: aiohttp team +Maintainer-email: team@aiohttp.org +License: Apache 2.0 +Project-URL: Chat: Gitter, https://gitter.im/aio-libs/Lobby +Project-URL: CI: GitHub Actions, https://github.com/aio-libs/aiosignal/actions +Project-URL: Coverage: codecov, https://codecov.io/github/aio-libs/aiosignal +Project-URL: Docs: RTD, https://docs.aiosignal.org +Project-URL: GitHub: issues, https://github.com/aio-libs/aiosignal/issues +Project-URL: GitHub: repo, https://github.com/aio-libs/aiosignal +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Intended Audience :: Developers +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Development Status :: 5 - Production/Stable +Classifier: Operating System :: POSIX +Classifier: Operating System :: MacOS :: MacOS X +Classifier: Operating System :: Microsoft :: Windows +Classifier: Framework :: AsyncIO +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: frozenlist (>=1.1.0) + +========= +aiosignal +========= + +.. image:: https://github.com/aio-libs/aiosignal/workflows/CI/badge.svg + :target: https://github.com/aio-libs/aiosignal/actions?query=workflow%3ACI + :alt: GitHub status for master branch + +.. image:: https://codecov.io/gh/aio-libs/aiosignal/branch/master/graph/badge.svg + :target: https://codecov.io/gh/aio-libs/aiosignal + :alt: codecov.io status for master branch + +.. image:: https://badge.fury.io/py/aiosignal.svg + :target: https://pypi.org/project/aiosignal + :alt: Latest PyPI package version + +.. image:: https://readthedocs.org/projects/aiosignal/badge/?version=latest + :target: https://aiosignal.readthedocs.io/ + :alt: Latest Read The Docs + +.. image:: https://img.shields.io/discourse/topics?server=https%3A%2F%2Faio-libs.discourse.group%2F + :target: https://aio-libs.discourse.group/ + :alt: Discourse group for io-libs + +.. image:: https://badges.gitter.im/Join%20Chat.svg + :target: https://gitter.im/aio-libs/Lobby + :alt: Chat on Gitter + +Introduction +============ + +A project to manage callbacks in `asyncio` projects. + +``Signal`` is a list of registered asynchronous callbacks. + +The signal's life-cycle has two stages: after creation its content +could be filled by using standard list operations: ``sig.append()`` +etc. + +After you call ``sig.freeze()`` the signal is *frozen*: adding, removing +and dropping callbacks is forbidden. + +The only available operation is calling the previously registered +callbacks by using ``await sig.send(data)``. + +For concrete usage examples see the `Signals + +section of the `Web Server Advanced +` chapter of the `aiohttp +documentation`_. + + +Installation +------------ + +:: + + $ pip install aiosignal + +The library requires Python 3.6 or newer. + + +Documentation +============= + +https://aiosignal.readthedocs.io/ + +Communication channels +====================== + +*gitter chat* https://gitter.im/aio-libs/Lobby + +Requirements +============ + +- Python >= 3.6 +- frozenlist >= 1.0.0 + +License +======= + +``aiosignal`` is offered under the Apache 2 license. + +Source code +=========== + +The project is hosted on GitHub_ + +Please file an issue in the `bug tracker +`_ if you have found a bug +or have some suggestions to improve the library. + +.. _GitHub: https://github.com/aio-libs/aiosignal +.. _aiohttp documentation: https://docs.aiohttp.org/ diff --git a/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..ea4440e8647550ac24f031b8852c3d3c20ed5fe5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/RECORD @@ -0,0 +1,10 @@ +aiosignal-1.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +aiosignal-1.3.1.dist-info/LICENSE,sha256=b9UkPpLdf5jsacesN3co50kFcJ_1J6W_mNbQJjwE9bY,11332 +aiosignal-1.3.1.dist-info/METADATA,sha256=c0HRnlYzfXKztZPTFDlPfygizTherhG5WdwXlvco0Ug,4008 +aiosignal-1.3.1.dist-info/RECORD,, +aiosignal-1.3.1.dist-info/WHEEL,sha256=ZL1lC_LiPDNRgDnOl2taCMc83aPEUZgHHv2h-LDgdiM,92 +aiosignal-1.3.1.dist-info/top_level.txt,sha256=z45aNOKGDdrI1roqZY3BGXQ22kJFPHBmVdwtLYLtXC0,10 +aiosignal/__init__.py,sha256=zQNfFYRSd84bswvpFv8ZWjEr5DeYwV3LXbMSyo2222s,867 +aiosignal/__init__.pyi,sha256=xeCddYSS8fZAkz8S4HuKSR2IDe3N7RW_LKcXDPPA1Xk,311 +aiosignal/__pycache__/__init__.cpython-310.pyc,, +aiosignal/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..5e1f087ca1ac49327ef76b101df80489a03c2e7f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..ac6df3afe74a5fd43afc7ab7f8393571a495fdc5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/aiosignal-1.3.1.dist-info/top_level.txt @@ -0,0 +1 @@ +aiosignal diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..9ecdc7586d08805bc984539f6672476e86e538b6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2005-2021 Fredrik Johansson and mpmath contributors + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + a. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + b. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + c. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..994b48acdba5cd0fdfb28cd1fbb0a84ebf81cba5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/METADATA @@ -0,0 +1,233 @@ +Metadata-Version: 2.1 +Name: mpmath +Version: 1.3.0 +Summary: Python library for arbitrary-precision floating-point arithmetic +Home-page: http://mpmath.org/ +Author: Fredrik Johansson +Author-email: fredrik.johansson@gmail.com +License: BSD +Project-URL: Source, https://github.com/fredrik-johansson/mpmath +Project-URL: Tracker, https://github.com/fredrik-johansson/mpmath/issues +Project-URL: Documentation, http://mpmath.org/doc/current/ +Classifier: License :: OSI Approved :: BSD License +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +License-File: LICENSE +Provides-Extra: develop +Requires-Dist: pytest (>=4.6) ; extra == 'develop' +Requires-Dist: pycodestyle ; extra == 'develop' +Requires-Dist: pytest-cov ; extra == 'develop' +Requires-Dist: codecov ; extra == 'develop' +Requires-Dist: wheel ; extra == 'develop' +Provides-Extra: docs +Requires-Dist: sphinx ; extra == 'docs' +Provides-Extra: gmpy +Requires-Dist: gmpy2 (>=2.1.0a4) ; (platform_python_implementation != "PyPy") and extra == 'gmpy' +Provides-Extra: tests +Requires-Dist: pytest (>=4.6) ; extra == 'tests' + +mpmath +====== + +|pypi version| |Build status| |Code coverage status| |Zenodo Badge| + +.. |pypi version| image:: https://img.shields.io/pypi/v/mpmath.svg + :target: https://pypi.python.org/pypi/mpmath +.. |Build status| image:: https://github.com/fredrik-johansson/mpmath/workflows/test/badge.svg + :target: https://github.com/fredrik-johansson/mpmath/actions?workflow=test +.. |Code coverage status| image:: https://codecov.io/gh/fredrik-johansson/mpmath/branch/master/graph/badge.svg + :target: https://codecov.io/gh/fredrik-johansson/mpmath +.. |Zenodo Badge| image:: https://zenodo.org/badge/2934512.svg + :target: https://zenodo.org/badge/latestdoi/2934512 + +A Python library for arbitrary-precision floating-point arithmetic. + +Website: http://mpmath.org/ +Main author: Fredrik Johansson + +Mpmath is free software released under the New BSD License (see the +LICENSE file for details) + +0. History and credits +---------------------- + +The following people (among others) have contributed major patches +or new features to mpmath: + +* Pearu Peterson +* Mario Pernici +* Ondrej Certik +* Vinzent Steinberg +* Nimish Telang +* Mike Taschuk +* Case Van Horsen +* Jorn Baayen +* Chris Smith +* Juan Arias de Reyna +* Ioannis Tziakos +* Aaron Meurer +* Stefan Krastanov +* Ken Allen +* Timo Hartmann +* Sergey B Kirpichev +* Kris Kuhlman +* Paul Masson +* Michael Kagalenko +* Jonathan Warner +* Max Gaukler +* Guillermo Navas-Palencia +* Nike Dattani + +Numerous other people have contributed by reporting bugs, +requesting new features, or suggesting improvements to the +documentation. + +For a detailed changelog, including individual contributions, +see the CHANGES file. + +Fredrik's work on mpmath during summer 2008 was sponsored by Google +as part of the Google Summer of Code program. + +Fredrik's work on mpmath during summer 2009 was sponsored by the +American Institute of Mathematics under the support of the National Science +Foundation Grant No. 0757627 (FRG: L-functions and Modular Forms). + +Any opinions, findings, and conclusions or recommendations expressed in this +material are those of the author(s) and do not necessarily reflect the +views of the sponsors. + +Credit also goes to: + +* The authors of the GMP library and the Python wrapper + gmpy, enabling mpmath to become much faster at + high precision +* The authors of MPFR, pari/gp, MPFUN, and other arbitrary- + precision libraries, whose documentation has been helpful + for implementing many of the algorithms in mpmath +* Wikipedia contributors; Abramowitz & Stegun; Gradshteyn & Ryzhik; + Wolfram Research for MathWorld and the Wolfram Functions site. + These are the main references used for special functions + implementations. +* George Brandl for developing the Sphinx documentation tool + used to build mpmath's documentation + +Release history: + +* Version 1.3.0 released on March 7, 2023 +* Version 1.2.0 released on February 1, 2021 +* Version 1.1.0 released on December 11, 2018 +* Version 1.0.0 released on September 27, 2017 +* Version 0.19 released on June 10, 2014 +* Version 0.18 released on December 31, 2013 +* Version 0.17 released on February 1, 2011 +* Version 0.16 released on September 24, 2010 +* Version 0.15 released on June 6, 2010 +* Version 0.14 released on February 5, 2010 +* Version 0.13 released on August 13, 2009 +* Version 0.12 released on June 9, 2009 +* Version 0.11 released on January 26, 2009 +* Version 0.10 released on October 15, 2008 +* Version 0.9 released on August 23, 2008 +* Version 0.8 released on April 20, 2008 +* Version 0.7 released on March 12, 2008 +* Version 0.6 released on January 13, 2008 +* Version 0.5 released on November 24, 2007 +* Version 0.4 released on November 3, 2007 +* Version 0.3 released on October 5, 2007 +* Version 0.2 released on October 2, 2007 +* Version 0.1 released on September 27, 2007 + +1. Download & installation +-------------------------- + +Mpmath requires Python 2.7 or 3.5 (or later versions). It has been tested +with CPython 2.7, 3.5 through 3.7 and for PyPy. + +The latest release of mpmath can be downloaded from the mpmath +website and from https://github.com/fredrik-johansson/mpmath/releases + +It should also be available in the Python Package Index at +https://pypi.python.org/pypi/mpmath + +To install latest release of Mpmath with pip, simply run + +``pip install mpmath`` + +Or unpack the mpmath archive and run + +``python setup.py install`` + +Mpmath can also be installed using + +``python -m easy_install mpmath`` + +The latest development code is available from +https://github.com/fredrik-johansson/mpmath + +See the main documentation for more detailed instructions. + +2. Running tests +---------------- + +The unit tests in mpmath/tests/ can be run via the script +runtests.py, but it is recommended to run them with py.test +(https://pytest.org/), especially +to generate more useful reports in case there are failures. + +You may also want to check out the demo scripts in the demo +directory. + +The master branch is automatically tested by Travis CI. + +3. Documentation +---------------- + +Documentation in reStructuredText format is available in the +doc directory included with the source package. These files +are human-readable, but can be compiled to prettier HTML using +the build.py script (requires Sphinx, http://sphinx.pocoo.org/). + +See setup.txt in the documentation for more information. + +The most recent documentation is also available in HTML format: + +http://mpmath.org/doc/current/ + +4. Known problems +----------------- + +Mpmath is a work in progress. Major issues include: + +* Some functions may return incorrect values when given extremely + large arguments or arguments very close to singularities. + +* Directed rounding works for arithmetic operations. It is implemented + heuristically for other operations, and their results may be off by one + or two units in the last place (even if otherwise accurate). + +* Some IEEE 754 features are not available. Inifinities and NaN are + partially supported; denormal rounding is currently not available + at all. + +* The interface for switching precision and rounding is not finalized. + The current method is not threadsafe. + +5. Help and bug reports +----------------------- + +General questions and comments can be sent to the mpmath mailinglist, +mpmath@googlegroups.com + +You can also report bugs and send patches to the mpmath issue tracker, +https://github.com/fredrik-johansson/mpmath/issues diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..21bdf458683495b2e0d7f52464d33fdc500333e7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/RECORD @@ -0,0 +1,180 @@ +mpmath-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +mpmath-1.3.0.dist-info/LICENSE,sha256=wmyugdpFCOXiSZhXd6M4IfGDIj67dNf4z7-Q_n7vL7c,1537 +mpmath-1.3.0.dist-info/METADATA,sha256=RLZupES5wNGa6UgV01a_BHrmtoDBkmi1wmVofNaoFAY,8630 +mpmath-1.3.0.dist-info/RECORD,, +mpmath-1.3.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92 +mpmath-1.3.0.dist-info/top_level.txt,sha256=BUVWrh8EVlkOhM1n3X9S8msTaVcC-3s6Sjt60avHYus,7 +mpmath/__init__.py,sha256=skFYTSwfwDBLChAV6pI3SdewgAQR3UBtyrfIK_Jdn-g,8765 +mpmath/__pycache__/__init__.cpython-310.pyc,, +mpmath/__pycache__/ctx_base.cpython-310.pyc,, +mpmath/__pycache__/ctx_fp.cpython-310.pyc,, +mpmath/__pycache__/ctx_iv.cpython-310.pyc,, +mpmath/__pycache__/ctx_mp.cpython-310.pyc,, +mpmath/__pycache__/ctx_mp_python.cpython-310.pyc,, +mpmath/__pycache__/function_docs.cpython-310.pyc,, +mpmath/__pycache__/identification.cpython-310.pyc,, +mpmath/__pycache__/math2.cpython-310.pyc,, +mpmath/__pycache__/rational.cpython-310.pyc,, +mpmath/__pycache__/usertools.cpython-310.pyc,, +mpmath/__pycache__/visualization.cpython-310.pyc,, +mpmath/calculus/__init__.py,sha256=UAgCIJ1YmaeyTqpNzjBlCZGeIzLtUZMEEpl99VWNjus,162 +mpmath/calculus/__pycache__/__init__.cpython-310.pyc,, +mpmath/calculus/__pycache__/approximation.cpython-310.pyc,, +mpmath/calculus/__pycache__/calculus.cpython-310.pyc,, +mpmath/calculus/__pycache__/differentiation.cpython-310.pyc,, +mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc,, +mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc,, +mpmath/calculus/__pycache__/odes.cpython-310.pyc,, +mpmath/calculus/__pycache__/optimization.cpython-310.pyc,, +mpmath/calculus/__pycache__/polynomials.cpython-310.pyc,, +mpmath/calculus/__pycache__/quadrature.cpython-310.pyc,, +mpmath/calculus/approximation.py,sha256=vyzu3YI6r63Oq1KFHrQz02mGXAcH23emqNYhJuUaFZ4,8817 +mpmath/calculus/calculus.py,sha256=A0gSp0hxSyEDfugJViY3CeWalF-vK701YftzrjSQzQ4,112 +mpmath/calculus/differentiation.py,sha256=2L6CBj8xtX9iip98NPbKsLtwtRjxi571wYmTMHFeL90,20226 +mpmath/calculus/extrapolation.py,sha256=xM0rvk2DFEF4iR1Jhl-Y3aS93iW9VVJX7y9IGpmzC-A,73306 +mpmath/calculus/inverselaplace.py,sha256=5-pn8N_t0PtgBTXixsXZ4xxrihK2J5gYsVfTKfDx4gA,36056 +mpmath/calculus/odes.py,sha256=gaHiw7IJjsONNTAa6izFPZpmcg9uyTp8MULnGdzTIGo,9908 +mpmath/calculus/optimization.py,sha256=bKnShXElBOmVOIOlFeksDsYCp9fYSmYwKmXDt0z26MM,32856 +mpmath/calculus/polynomials.py,sha256=D16BhU_SHbVi06IxNwABHR-H77IylndNsN3muPTuFYs,7877 +mpmath/calculus/quadrature.py,sha256=n-avtS8E43foV-5tr5lofgOBaiMUYE8AJjQcWI9QcKk,42432 +mpmath/ctx_base.py,sha256=rfjmfMyA55x8R_cWFINUwWVTElfZmyx5erKDdauSEVw,15985 +mpmath/ctx_fp.py,sha256=ctUjx_NoU0iFWk05cXDYCL2ZtLZOlWs1n6Zao3pbG2g,6572 +mpmath/ctx_iv.py,sha256=tqdMr-GDfkZk1EhoGeCAajy7pQv-RWtrVqhYjfI8r4g,17211 +mpmath/ctx_mp.py,sha256=d3r4t7xHNqSFtmqsA9Btq1Npy3WTM-pcM2_jeCyECxY,49452 +mpmath/ctx_mp_python.py,sha256=3olYWo4lk1SnQ0A_IaZ181qqG8u5pxGat_v-L4Qtn3Y,37815 +mpmath/function_docs.py,sha256=g4PP8n6ILXmHcLyA50sxK6Tmp_Z4_pRN-wDErU8D1i4,283512 +mpmath/functions/__init__.py,sha256=YXVdhqv-6LKm6cr5xxtTNTtuD9zDPKGQl8GmS0xz2xo,330 +mpmath/functions/__pycache__/__init__.cpython-310.pyc,, +mpmath/functions/__pycache__/bessel.cpython-310.pyc,, +mpmath/functions/__pycache__/elliptic.cpython-310.pyc,, +mpmath/functions/__pycache__/expintegrals.cpython-310.pyc,, +mpmath/functions/__pycache__/factorials.cpython-310.pyc,, +mpmath/functions/__pycache__/functions.cpython-310.pyc,, +mpmath/functions/__pycache__/hypergeometric.cpython-310.pyc,, +mpmath/functions/__pycache__/orthogonal.cpython-310.pyc,, +mpmath/functions/__pycache__/qfunctions.cpython-310.pyc,, +mpmath/functions/__pycache__/rszeta.cpython-310.pyc,, +mpmath/functions/__pycache__/signals.cpython-310.pyc,, +mpmath/functions/__pycache__/theta.cpython-310.pyc,, +mpmath/functions/__pycache__/zeta.cpython-310.pyc,, +mpmath/functions/__pycache__/zetazeros.cpython-310.pyc,, +mpmath/functions/bessel.py,sha256=dUPLu8frlK-vmf3-irX_7uvwyw4xccv6EIizmIZ88kM,37938 +mpmath/functions/elliptic.py,sha256=qz0yVMb4lWEeOTDL_DWz5u5awmGIPKAsuZFJXgwHJNU,42237 +mpmath/functions/expintegrals.py,sha256=75X_MRdYc1F_X73bgNiOJqwRlS2hqAzcFLl3RM2tCDc,11644 +mpmath/functions/factorials.py,sha256=8_6kCR7e4k1GwxiAOJu0NRadeF4jA28qx4hidhu4ILk,5273 +mpmath/functions/functions.py,sha256=ub2JExvqzCWLkm5yAm72Fr6fdWmZZUknq9_3w9MEigI,18100 +mpmath/functions/hypergeometric.py,sha256=Z0OMAMC4ylK42n_SnamyFVnUx6zHLyCLCoJDSZ1JrHY,51570 +mpmath/functions/orthogonal.py,sha256=FabkxKfBoSseA5flWu1a3re-2BYaew9augqIsT8LaLw,16097 +mpmath/functions/qfunctions.py,sha256=a3EHGKQt_jMd4x9I772Jz-TGFnGY-arWqPvZGz9QSe0,7633 +mpmath/functions/rszeta.py,sha256=yuUVp4ilIyDmXyE3WTBxDDjwfEJNypJnbPS-xPH5How,46184 +mpmath/functions/signals.py,sha256=ELotwQaW1CDpv-eeJzOZ5c23NhfaZcj9_Gkb3psvS0Q,703 +mpmath/functions/theta.py,sha256=KggOocczoMG6_HMoal4oEP7iZ4SKOou9JFE-WzY2r3M,37320 +mpmath/functions/zeta.py,sha256=ue7JY7GXA0oX8q08sQJl2CSRrZ7kOt8HsftpVjnTwrE,36410 +mpmath/functions/zetazeros.py,sha256=uq6TVyZBcY2MLX7VSdVfn0TOkowBLM9fXtnySEwaNzw,30858 +mpmath/identification.py,sha256=7aMdngRAaeL_MafDUNbmEIlGQSklHDZ8pmPFt-OLgkw,29253 +mpmath/libmp/__init__.py,sha256=UCDjLZw4brbklaCmSixCcPdLdHkz8sF_-6F_wr0duAg,3790 +mpmath/libmp/__pycache__/__init__.cpython-310.pyc,, +mpmath/libmp/__pycache__/backend.cpython-310.pyc,, +mpmath/libmp/__pycache__/gammazeta.cpython-310.pyc,, +mpmath/libmp/__pycache__/libelefun.cpython-310.pyc,, +mpmath/libmp/__pycache__/libhyper.cpython-310.pyc,, +mpmath/libmp/__pycache__/libintmath.cpython-310.pyc,, +mpmath/libmp/__pycache__/libmpc.cpython-310.pyc,, +mpmath/libmp/__pycache__/libmpf.cpython-310.pyc,, +mpmath/libmp/__pycache__/libmpi.cpython-310.pyc,, +mpmath/libmp/backend.py,sha256=26A8pUkaGov26vrrFNQVyWJ5LDtK8sl3UHrYLecaTjA,3360 +mpmath/libmp/gammazeta.py,sha256=Xqdw6PMoswDaSca_sOs-IglRuk3fb8c9p43M_lbcrlc,71469 +mpmath/libmp/libelefun.py,sha256=joBZP4FOdxPfieWso1LPtSr6dHydpG_LQiF_bYQYWMg,43861 +mpmath/libmp/libhyper.py,sha256=J9fmdDF6u27EcssEWvBuVaAa3hFjPvPN1SgRgu1dEbc,36624 +mpmath/libmp/libintmath.py,sha256=aIRT0rkUZ_sdGQf3TNCLd-pBMvtQWjssbvFLfK7U0jc,16688 +mpmath/libmp/libmpc.py,sha256=KBndUjs5YVS32-Id3fflDfYgpdW1Prx6zfo8Ez5Qbrs,26875 +mpmath/libmp/libmpf.py,sha256=vpP0kNVkScbCVoZogJ4Watl4I7Ce0d4dzHVjfVe57so,45021 +mpmath/libmp/libmpi.py,sha256=u0I5Eiwkqa-4-dXETi5k7MuaxBeZbvCAPFtl93U9YF0,27622 +mpmath/math2.py,sha256=O5Dglg81SsW0wfHDUJcXOD8-cCaLvbVIvyw0sVmRbpI,18561 +mpmath/matrices/__init__.py,sha256=ETzGDciYbq9ftiKwaMbJ15EI-KNXHrzRb-ZHehhqFjs,94 +mpmath/matrices/__pycache__/__init__.cpython-310.pyc,, +mpmath/matrices/__pycache__/calculus.cpython-310.pyc,, +mpmath/matrices/__pycache__/eigen.cpython-310.pyc,, +mpmath/matrices/__pycache__/eigen_symmetric.cpython-310.pyc,, +mpmath/matrices/__pycache__/linalg.cpython-310.pyc,, +mpmath/matrices/__pycache__/matrices.cpython-310.pyc,, +mpmath/matrices/calculus.py,sha256=PNRq-p2nxgT-fzC54K2depi8ddhdx6Q86G8qpUiHeUY,18609 +mpmath/matrices/eigen.py,sha256=GbDXI3CixzEdXxr1G86uUWkAngAvd-05MmSQ-Tsu_5k,24394 +mpmath/matrices/eigen_symmetric.py,sha256=FPKPeQr1cGYw6Y6ea32a1YdEWQDLP6JlQHEA2WfNLYg,58534 +mpmath/matrices/linalg.py,sha256=04C3ijzMFom7ob5fXBCDfyPPdo3BIboIeE8x2A6vqF0,26958 +mpmath/matrices/matrices.py,sha256=o78Eq62EHQnxcsR0LBoWDEGREOoN4L2iDM1q3dQrw0o,32331 +mpmath/rational.py,sha256=64d56fvZXngYZT7nOAHeFRUX77eJ1A0R3rpfWBU-mSo,5976 +mpmath/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +mpmath/tests/__pycache__/__init__.cpython-310.pyc,, +mpmath/tests/__pycache__/extratest_gamma.cpython-310.pyc,, +mpmath/tests/__pycache__/extratest_zeta.cpython-310.pyc,, +mpmath/tests/__pycache__/runtests.cpython-310.pyc,, +mpmath/tests/__pycache__/test_basic_ops.cpython-310.pyc,, +mpmath/tests/__pycache__/test_bitwise.cpython-310.pyc,, +mpmath/tests/__pycache__/test_calculus.cpython-310.pyc,, +mpmath/tests/__pycache__/test_compatibility.cpython-310.pyc,, +mpmath/tests/__pycache__/test_convert.cpython-310.pyc,, +mpmath/tests/__pycache__/test_diff.cpython-310.pyc,, +mpmath/tests/__pycache__/test_division.cpython-310.pyc,, +mpmath/tests/__pycache__/test_eigen.cpython-310.pyc,, +mpmath/tests/__pycache__/test_eigen_symmetric.cpython-310.pyc,, +mpmath/tests/__pycache__/test_elliptic.cpython-310.pyc,, +mpmath/tests/__pycache__/test_fp.cpython-310.pyc,, +mpmath/tests/__pycache__/test_functions.cpython-310.pyc,, +mpmath/tests/__pycache__/test_functions2.cpython-310.pyc,, +mpmath/tests/__pycache__/test_gammazeta.cpython-310.pyc,, +mpmath/tests/__pycache__/test_hp.cpython-310.pyc,, +mpmath/tests/__pycache__/test_identify.cpython-310.pyc,, +mpmath/tests/__pycache__/test_interval.cpython-310.pyc,, +mpmath/tests/__pycache__/test_levin.cpython-310.pyc,, +mpmath/tests/__pycache__/test_linalg.cpython-310.pyc,, +mpmath/tests/__pycache__/test_matrices.cpython-310.pyc,, +mpmath/tests/__pycache__/test_mpmath.cpython-310.pyc,, +mpmath/tests/__pycache__/test_ode.cpython-310.pyc,, +mpmath/tests/__pycache__/test_pickle.cpython-310.pyc,, +mpmath/tests/__pycache__/test_power.cpython-310.pyc,, +mpmath/tests/__pycache__/test_quad.cpython-310.pyc,, +mpmath/tests/__pycache__/test_rootfinding.cpython-310.pyc,, +mpmath/tests/__pycache__/test_special.cpython-310.pyc,, +mpmath/tests/__pycache__/test_str.cpython-310.pyc,, +mpmath/tests/__pycache__/test_summation.cpython-310.pyc,, +mpmath/tests/__pycache__/test_trig.cpython-310.pyc,, +mpmath/tests/__pycache__/test_visualization.cpython-310.pyc,, +mpmath/tests/__pycache__/torture.cpython-310.pyc,, +mpmath/tests/extratest_gamma.py,sha256=xidhXUelILcxtiPGoTBHjqUOKIJzEaZ_v3nntGQyWZQ,7228 +mpmath/tests/extratest_zeta.py,sha256=sg10j9RhjBpV2EdUqyYhGV2ERWvM--EvwwGIz6HTmlw,1003 +mpmath/tests/runtests.py,sha256=7NUV82F3K_5AhU8mCLUFf5OibtT7uloFCwPyM3l71wM,5189 +mpmath/tests/test_basic_ops.py,sha256=dsB8DRG-GrPzBaZ-bIauYabaeqXbfqBo9SIP9BqcTSs,15348 +mpmath/tests/test_bitwise.py,sha256=-nLYhgQbhDza3SQM63BhktYntACagqMYx9ib3dPnTKM,7686 +mpmath/tests/test_calculus.py,sha256=4oxtNfMpO4RLLoOzrv7r9-h8BcqfBsJIE6UpsHe7c4w,9187 +mpmath/tests/test_compatibility.py,sha256=_t3ASZ3jhfAMnN1voWX7PDNIDzn-3PokkJGIdT1x7y0,2306 +mpmath/tests/test_convert.py,sha256=JPcDcTJIWh5prIxjx5DM1aNWgqlUoF2KpHvAgK3uHi4,8834 +mpmath/tests/test_diff.py,sha256=qjiF8NxQ8vueuZ5ZHGPQ-kjcj_I7Jh_fEdFtaA8DzEI,2466 +mpmath/tests/test_division.py,sha256=6lUeZfmaBWvvszdqlWLMHgXPjVsxvW1WZpd4-jFWCpU,5340 +mpmath/tests/test_eigen.py,sha256=2mnqVATGbsJkvSVHPpitfAk881twFfb3LsO3XikV9Hs,3905 +mpmath/tests/test_eigen_symmetric.py,sha256=v0VimCicIU2owASDMBaP-t-30uq-pXcsglt95KBtNO4,8778 +mpmath/tests/test_elliptic.py,sha256=Kjiwq9Bb6N_OOzzWewGQ1M_PMa7vRs42V0t90gloZxo,26225 +mpmath/tests/test_fp.py,sha256=AJo0FTyH4BuUnUsv176LD956om308KGYndy-b54KGxM,89997 +mpmath/tests/test_functions.py,sha256=b47VywdomoOX6KmMmz9-iv2IqVIydwKSuUw2pWlFHrY,30955 +mpmath/tests/test_functions2.py,sha256=vlw2RWhL1oTcifnOMDx1a_YzN96UgNNIE5STeKRv1HY,96990 +mpmath/tests/test_gammazeta.py,sha256=AB34O0DV7AlEf9Z4brnCadeQU5-uAwhWRw5FZas65DA,27917 +mpmath/tests/test_hp.py,sha256=6hcENu6Te2klPEiTSeLBIRPlH7PADlJwFKbx8xpnOhg,10461 +mpmath/tests/test_identify.py,sha256=lGUIPfrB2paTg0cFUo64GmMzF77F9gs9FQjX7gxGHV8,692 +mpmath/tests/test_interval.py,sha256=TjYd7a9ca6iRJiLjw06isLeZTuGoGAPmgleDZ0cYfJ0,17527 +mpmath/tests/test_levin.py,sha256=P8M11yV1dj_gdSNv5xuwCzFiF86QyRDtPMjURy6wJ28,5090 +mpmath/tests/test_linalg.py,sha256=miKEnwB8iwWV13hi1bF1cg3hgB4rTKOR0fvDVfWmXds,10440 +mpmath/tests/test_matrices.py,sha256=qyA4Ml2CvNvW034lzB01G6wVgNr7UrgZqh2wkMXtpzM,7944 +mpmath/tests/test_mpmath.py,sha256=LVyJUeofiaxW-zLKWVBCz59L9UQsjlW0Ts9_oBiEv_4,196 +mpmath/tests/test_ode.py,sha256=zAxexBH4fnmFNO4bvEHbug1NJWC5zqfFaVDlYijowkY,1822 +mpmath/tests/test_pickle.py,sha256=Y8CKmDLFsJHUqG8CDaBw5ilrPP4YT1xijVduLpQ7XFE,401 +mpmath/tests/test_power.py,sha256=sz_K02SmNxpa6Kb1uJLN_N4tXTJGdQ___vPRshEN7Gk,5227 +mpmath/tests/test_quad.py,sha256=49Ltft0vZ_kdKLL5s-Kj-BzAVoF5LPVEUeNUzdOkghI,3893 +mpmath/tests/test_rootfinding.py,sha256=umQegEaKHmYOEl5jEyoD-VLKDtXsTJJkepKEr4c0dC0,3132 +mpmath/tests/test_special.py,sha256=YbMIoMIkJEvvKYIzS0CXthJFG0--j6un7-tcE6b7FPM,2848 +mpmath/tests/test_str.py,sha256=0WsGD9hMPRi8zcuYMA9Cu2mOvQiCFskPwMsMf8lBDK4,544 +mpmath/tests/test_summation.py,sha256=fdNlsvRVOsbWxbhlyDLDaEO2S8kTJrRMKIvB5-aNci0,2035 +mpmath/tests/test_trig.py,sha256=zPtkIEnZaThxcWur4k7BX8-2Jmj-AhO191Svv7ANYUU,4799 +mpmath/tests/test_visualization.py,sha256=1PqtkoUx-WsKYgTRiu5o9pBc85kwhf1lzU2eobDQCJM,944 +mpmath/tests/torture.py,sha256=LD95oES7JY2KroELK-m-jhvtbvZaKChnt0Cq7kFMNCw,7868 +mpmath/usertools.py,sha256=a-TDw7XSRsPdBEffxOooDV4WDFfuXnO58P75dcAD87I,3029 +mpmath/visualization.py,sha256=pnnbjcd9AhFVRBZavYX5gjx4ytK_kXoDDisYR6EpXhs,10627 diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..57e3d840d59a650ac5bccbad5baeec47d155f0ad --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.38.4) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..dda7c273a8dd1c6adffa9d2d9901e0ce6876f4ac --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mpmath-1.3.0.dist-info/top_level.txt @@ -0,0 +1 @@ +mpmath diff --git a/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/LICENSE.txt b/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..100b4bffb00abd785f61fca42fea2ab74a70d7f7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/LICENSE.txt @@ -0,0 +1,37 @@ +NetworkX is distributed with the 3-clause BSD license. + +:: + + Copyright (C) 2004-2024, NetworkX Developers + Aric Hagberg + Dan Schult + Pieter Swart + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of the NetworkX Developers nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..fa5e70540b98a3d0ef30c33afa40a045ca60afbc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/METADATA @@ -0,0 +1,133 @@ +Metadata-Version: 2.1 +Name: networkx +Version: 3.3 +Summary: Python package for creating and manipulating graphs and networks +Author-email: Aric Hagberg +Maintainer-email: NetworkX Developers +Project-URL: Homepage, https://networkx.org/ +Project-URL: Bug Tracker, https://github.com/networkx/networkx/issues +Project-URL: Documentation, https://networkx.org/documentation/stable/ +Project-URL: Source Code, https://github.com/networkx/networkx +Keywords: Networks,Graph Theory,Mathematics,network,graph,discrete mathematics,math +Platform: Linux +Platform: Mac OSX +Platform: Windows +Platform: Unix +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: BSD License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Scientific/Engineering :: Bio-Informatics +Classifier: Topic :: Scientific/Engineering :: Information Analysis +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Physics +Requires-Python: >=3.10 +Description-Content-Type: text/x-rst +License-File: LICENSE.txt +Provides-Extra: default +Requires-Dist: numpy >=1.23 ; extra == 'default' +Requires-Dist: scipy !=1.11.0,!=1.11.1,>=1.9 ; extra == 'default' +Requires-Dist: matplotlib >=3.6 ; extra == 'default' +Requires-Dist: pandas >=1.4 ; extra == 'default' +Provides-Extra: developer +Requires-Dist: changelist ==0.5 ; extra == 'developer' +Requires-Dist: pre-commit >=3.2 ; extra == 'developer' +Requires-Dist: mypy >=1.1 ; extra == 'developer' +Requires-Dist: rtoml ; extra == 'developer' +Provides-Extra: doc +Requires-Dist: sphinx >=7 ; extra == 'doc' +Requires-Dist: pydata-sphinx-theme >=0.14 ; extra == 'doc' +Requires-Dist: sphinx-gallery >=0.14 ; extra == 'doc' +Requires-Dist: numpydoc >=1.7 ; extra == 'doc' +Requires-Dist: pillow >=9.4 ; extra == 'doc' +Requires-Dist: texext >=0.6.7 ; extra == 'doc' +Requires-Dist: myst-nb >=1.0 ; extra == 'doc' +Provides-Extra: extra +Requires-Dist: lxml >=4.6 ; extra == 'extra' +Requires-Dist: pygraphviz >=1.12 ; extra == 'extra' +Requires-Dist: pydot >=2.0 ; extra == 'extra' +Requires-Dist: sympy >=1.10 ; extra == 'extra' +Provides-Extra: test +Requires-Dist: pytest >=7.2 ; extra == 'test' +Requires-Dist: pytest-cov >=4.0 ; extra == 'test' + +NetworkX +======== + + +.. image:: https://github.com/networkx/networkx/workflows/test/badge.svg?branch=main + :target: https://github.com/networkx/networkx/actions?query=workflow%3A%22test%22 + +.. image:: https://codecov.io/gh/networkx/networkx/branch/main/graph/badge.svg + :target: https://app.codecov.io/gh/networkx/networkx/branch/main + +.. image:: https://img.shields.io/github/labels/networkx/networkx/Good%20First%20Issue?color=green&label=Contribute%20&style=flat-square + :target: https://github.com/networkx/networkx/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22 + + +NetworkX is a Python package for the creation, manipulation, +and study of the structure, dynamics, and functions +of complex networks. + +- **Website (including documentation):** https://networkx.org +- **Mailing list:** https://groups.google.com/forum/#!forum/networkx-discuss +- **Source:** https://github.com/networkx/networkx +- **Bug reports:** https://github.com/networkx/networkx/issues +- **Report a security vulnerability:** https://tidelift.com/security +- **Tutorial:** https://networkx.org/documentation/latest/tutorial.html +- **GitHub Discussions:** https://github.com/networkx/networkx/discussions + +Simple example +-------------- + +Find the shortest path between two nodes in an undirected graph: + +.. code:: pycon + + >>> import networkx as nx + >>> G = nx.Graph() + >>> G.add_edge("A", "B", weight=4) + >>> G.add_edge("B", "D", weight=2) + >>> G.add_edge("A", "C", weight=3) + >>> G.add_edge("C", "D", weight=4) + >>> nx.shortest_path(G, "A", "D", weight="weight") + ['A', 'B', 'D'] + +Install +------- + +Install the latest version of NetworkX:: + + $ pip install networkx + +Install with all optional dependencies:: + + $ pip install networkx[all] + +For additional details, please see `INSTALL.rst`. + +Bugs +---- + +Please report any bugs that you find `here `_. +Or, even better, fork the repository on `GitHub `_ +and create a pull request (PR). We welcome all changes, big or small, and we +will help you make the PR if you are new to `git` (just ask on the issue and/or +see `CONTRIBUTING.rst`). + +License +------- + +Released under the 3-Clause BSD license (see `LICENSE.txt`):: + + Copyright (C) 2004-2024 NetworkX Developers + Aric Hagberg + Dan Schult + Pieter Swart diff --git a/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..3587db22ea69bf0ea0bb543aa14af6ddf738768f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/RECORD @@ -0,0 +1,1149 @@ +networkx-3.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +networkx-3.3.dist-info/LICENSE.txt,sha256=W0M7kPdV65u9Bv7_HRpPXyMsUgihhWlBmeRfqV12J5I,1763 +networkx-3.3.dist-info/METADATA,sha256=YQezeWnohXGh2TPdJ8pc1uuJaJ0gu8Q6rifuJxSHL1A,5131 +networkx-3.3.dist-info/RECORD,, +networkx-3.3.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 +networkx-3.3.dist-info/entry_points.txt,sha256=b0FW-zm-m9itB-Zkm7w_8c9yX9WGGTg-r_N_A32PAGs,87 +networkx-3.3.dist-info/top_level.txt,sha256=s3Mk-7KOlu-kD39w8Xg_KXoP5Z_MVvgB-upkyuOE4Hk,9 +networkx/__init__.py,sha256=gVLXWn6YmX68Cl9mmGpqPZLtiIIVUqGmUyztqRfhry4,1106 +networkx/__pycache__/__init__.cpython-310.pyc,, +networkx/__pycache__/conftest.cpython-310.pyc,, +networkx/__pycache__/convert.cpython-310.pyc,, +networkx/__pycache__/convert_matrix.cpython-310.pyc,, +networkx/__pycache__/exception.cpython-310.pyc,, +networkx/__pycache__/lazy_imports.cpython-310.pyc,, +networkx/__pycache__/relabel.cpython-310.pyc,, +networkx/algorithms/__init__.py,sha256=oij1HDNcE7GhTPAtuHYT8eGZdH4K_vYaha51X5XoUCY,6559 +networkx/algorithms/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/__pycache__/asteroidal.cpython-310.pyc,, +networkx/algorithms/__pycache__/boundary.cpython-310.pyc,, +networkx/algorithms/__pycache__/bridges.cpython-310.pyc,, +networkx/algorithms/__pycache__/broadcasting.cpython-310.pyc,, +networkx/algorithms/__pycache__/chains.cpython-310.pyc,, +networkx/algorithms/__pycache__/chordal.cpython-310.pyc,, +networkx/algorithms/__pycache__/clique.cpython-310.pyc,, +networkx/algorithms/__pycache__/cluster.cpython-310.pyc,, +networkx/algorithms/__pycache__/communicability_alg.cpython-310.pyc,, +networkx/algorithms/__pycache__/core.cpython-310.pyc,, +networkx/algorithms/__pycache__/covering.cpython-310.pyc,, +networkx/algorithms/__pycache__/cuts.cpython-310.pyc,, +networkx/algorithms/__pycache__/cycles.cpython-310.pyc,, +networkx/algorithms/__pycache__/d_separation.cpython-310.pyc,, +networkx/algorithms/__pycache__/dag.cpython-310.pyc,, +networkx/algorithms/__pycache__/distance_measures.cpython-310.pyc,, +networkx/algorithms/__pycache__/distance_regular.cpython-310.pyc,, +networkx/algorithms/__pycache__/dominance.cpython-310.pyc,, +networkx/algorithms/__pycache__/dominating.cpython-310.pyc,, +networkx/algorithms/__pycache__/efficiency_measures.cpython-310.pyc,, +networkx/algorithms/__pycache__/euler.cpython-310.pyc,, +networkx/algorithms/__pycache__/graph_hashing.cpython-310.pyc,, +networkx/algorithms/__pycache__/graphical.cpython-310.pyc,, +networkx/algorithms/__pycache__/hierarchy.cpython-310.pyc,, +networkx/algorithms/__pycache__/hybrid.cpython-310.pyc,, +networkx/algorithms/__pycache__/isolate.cpython-310.pyc,, +networkx/algorithms/__pycache__/link_prediction.cpython-310.pyc,, +networkx/algorithms/__pycache__/lowest_common_ancestors.cpython-310.pyc,, +networkx/algorithms/__pycache__/matching.cpython-310.pyc,, +networkx/algorithms/__pycache__/mis.cpython-310.pyc,, +networkx/algorithms/__pycache__/moral.cpython-310.pyc,, +networkx/algorithms/__pycache__/node_classification.cpython-310.pyc,, +networkx/algorithms/__pycache__/non_randomness.cpython-310.pyc,, +networkx/algorithms/__pycache__/planar_drawing.cpython-310.pyc,, +networkx/algorithms/__pycache__/planarity.cpython-310.pyc,, +networkx/algorithms/__pycache__/polynomials.cpython-310.pyc,, +networkx/algorithms/__pycache__/reciprocity.cpython-310.pyc,, +networkx/algorithms/__pycache__/regular.cpython-310.pyc,, +networkx/algorithms/__pycache__/richclub.cpython-310.pyc,, +networkx/algorithms/__pycache__/similarity.cpython-310.pyc,, +networkx/algorithms/__pycache__/simple_paths.cpython-310.pyc,, +networkx/algorithms/__pycache__/smallworld.cpython-310.pyc,, +networkx/algorithms/__pycache__/smetric.cpython-310.pyc,, +networkx/algorithms/__pycache__/sparsifiers.cpython-310.pyc,, +networkx/algorithms/__pycache__/structuralholes.cpython-310.pyc,, +networkx/algorithms/__pycache__/summarization.cpython-310.pyc,, +networkx/algorithms/__pycache__/swap.cpython-310.pyc,, +networkx/algorithms/__pycache__/threshold.cpython-310.pyc,, +networkx/algorithms/__pycache__/time_dependent.cpython-310.pyc,, +networkx/algorithms/__pycache__/tournament.cpython-310.pyc,, +networkx/algorithms/__pycache__/triads.cpython-310.pyc,, +networkx/algorithms/__pycache__/vitality.cpython-310.pyc,, +networkx/algorithms/__pycache__/voronoi.cpython-310.pyc,, +networkx/algorithms/__pycache__/walks.cpython-310.pyc,, +networkx/algorithms/__pycache__/wiener.cpython-310.pyc,, +networkx/algorithms/approximation/__init__.py,sha256=zf9NM64g-aZwEGqI5C0DpU5FML2GrkaaQsO6SW85atE,1177 +networkx/algorithms/approximation/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/approximation/__pycache__/clique.cpython-310.pyc,, +networkx/algorithms/approximation/__pycache__/clustering_coefficient.cpython-310.pyc,, +networkx/algorithms/approximation/__pycache__/connectivity.cpython-310.pyc,, +networkx/algorithms/approximation/__pycache__/distance_measures.cpython-310.pyc,, +networkx/algorithms/approximation/__pycache__/dominating_set.cpython-310.pyc,, +networkx/algorithms/approximation/__pycache__/kcomponents.cpython-310.pyc,, +networkx/algorithms/approximation/__pycache__/matching.cpython-310.pyc,, +networkx/algorithms/approximation/__pycache__/maxcut.cpython-310.pyc,, +networkx/algorithms/approximation/__pycache__/ramsey.cpython-310.pyc,, +networkx/algorithms/approximation/__pycache__/steinertree.cpython-310.pyc,, +networkx/algorithms/approximation/__pycache__/traveling_salesman.cpython-310.pyc,, +networkx/algorithms/approximation/__pycache__/treewidth.cpython-310.pyc,, +networkx/algorithms/approximation/__pycache__/vertex_cover.cpython-310.pyc,, +networkx/algorithms/approximation/clique.py,sha256=pkIg-cIgRxDHwGrQEwSsu_dca2ONdpwkw7heSALfOIg,7690 +networkx/algorithms/approximation/clustering_coefficient.py,sha256=SWpSLEhW3DJc1n2fHlSbJSGg3wdoJkN5Y4_tnntn0Ws,2164 +networkx/algorithms/approximation/connectivity.py,sha256=Zh0kx9Tc2fbcBgrJM33Ow8_v1rz4DVAR_d1sJbD2x4w,13119 +networkx/algorithms/approximation/distance_measures.py,sha256=UEkmKagNw9sj8kiUDdbAeYuzvZ31pgLMXqzliqMkG84,5805 +networkx/algorithms/approximation/dominating_set.py,sha256=HdwxBt82rilwaSzaCUXpgBvikv9qvCqcqnmpKiPNL40,4709 +networkx/algorithms/approximation/kcomponents.py,sha256=BJ1nNpQ9TbDqZTmSr0QZZa3i3uDAtiUK4CzPpMpJzyk,13286 +networkx/algorithms/approximation/matching.py,sha256=gwBVSGEgME38WLz_lSzt9ZKp-oWzXAo1ac1Kos98tB4,1174 +networkx/algorithms/approximation/maxcut.py,sha256=eTQZqsDQAAUaufni-aDJAY2UzIcajDhRMdj-AcqVkPs,4333 +networkx/algorithms/approximation/ramsey.py,sha256=UjY5DlkL7j6HagdcmF8T_w07JuSv5fylf9EI8BTmMDQ,1357 +networkx/algorithms/approximation/steinertree.py,sha256=GAHjv9KjzTGAERSOVHBBTgbd8g8mpz_ZifxtFtnTyGk,7414 +networkx/algorithms/approximation/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/approximation/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_approx_clust_coeff.cpython-310.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_clique.cpython-310.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_connectivity.cpython-310.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_distance_measures.cpython-310.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_dominating_set.cpython-310.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_kcomponents.cpython-310.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_matching.cpython-310.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_maxcut.cpython-310.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_ramsey.cpython-310.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_steinertree.cpython-310.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_traveling_salesman.cpython-310.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_treewidth.cpython-310.pyc,, +networkx/algorithms/approximation/tests/__pycache__/test_vertex_cover.cpython-310.pyc,, +networkx/algorithms/approximation/tests/test_approx_clust_coeff.py,sha256=PGOVEKf2BcJu1vvjZrgTlBBpwM8V6t7yCANjyS9nWF0,1171 +networkx/algorithms/approximation/tests/test_clique.py,sha256=JZ_ja03aVU7vnZ42Joy1ze0vjdcm_CnDhD96Z4W_Dcc,3022 +networkx/algorithms/approximation/tests/test_connectivity.py,sha256=gDG6tsgP3ux7Dgu0x7r0nso7_yknIxicV42Gq0It5pc,5952 +networkx/algorithms/approximation/tests/test_distance_measures.py,sha256=GSyupA_jqSc_pLPSMnZFNcBgZc8-KFWgt6Q7uFegTqg,2024 +networkx/algorithms/approximation/tests/test_dominating_set.py,sha256=l4pBDY7pK7Fxw-S4tOlNcxf-j2j5GpHPJ9f4TrMs1sI,2686 +networkx/algorithms/approximation/tests/test_kcomponents.py,sha256=tTljP1FHzXrUwi-oBz5AQcibRw1NgR4N5UE0a2OrOUA,9346 +networkx/algorithms/approximation/tests/test_matching.py,sha256=nitZncaM0605kaIu1NO6_5TFV2--nohUCO46XTD_lnM,186 +networkx/algorithms/approximation/tests/test_maxcut.py,sha256=U6CDZFSLfYDII-1nX9XB7avSz10kTx88vNazJFoLQ1k,2804 +networkx/algorithms/approximation/tests/test_ramsey.py,sha256=h36Ol39csHbIoTDBxbxMgn4371iVUGZ3a2N6l7d56lI,1143 +networkx/algorithms/approximation/tests/test_steinertree.py,sha256=HhYvosChxB-kTu9XtKcxVxJndxZkOjVMG5tKfjRC9mM,8368 +networkx/algorithms/approximation/tests/test_traveling_salesman.py,sha256=nr4KrhJfVR4S7TpCc6QMTDUJYZn1YGmDwprTXoFtlZ4,30928 +networkx/algorithms/approximation/tests/test_treewidth.py,sha256=MWFFcmjO0QxM8FS8iXSCtfGnk6eqG2kFyv1u2qnSeUo,9096 +networkx/algorithms/approximation/tests/test_vertex_cover.py,sha256=FobHNhG9CAMeB_AOEprUs-7XQdPoc1YvfmXhozDZ8pM,1942 +networkx/algorithms/approximation/traveling_salesman.py,sha256=tGw-gV5yfo6eqg7t3K_c_L2ClATjnxAB0hFsEma8dh0,55917 +networkx/algorithms/approximation/treewidth.py,sha256=Yu944jTE9MODBo1QiZjxbAGmHiC5MXZZTNV1YrLfz9o,8216 +networkx/algorithms/approximation/vertex_cover.py,sha256=85QvMQ7qJjv7WUclpwvaOKF_g6TQjW7OvfWTQJr8fXQ,2802 +networkx/algorithms/assortativity/__init__.py,sha256=ov3HRRbeYB_6Qezvxp1OTl77GBpw-EWkWGUzgfT8G9c,294 +networkx/algorithms/assortativity/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/assortativity/__pycache__/connectivity.cpython-310.pyc,, +networkx/algorithms/assortativity/__pycache__/correlation.cpython-310.pyc,, +networkx/algorithms/assortativity/__pycache__/mixing.cpython-310.pyc,, +networkx/algorithms/assortativity/__pycache__/neighbor_degree.cpython-310.pyc,, +networkx/algorithms/assortativity/__pycache__/pairs.cpython-310.pyc,, +networkx/algorithms/assortativity/connectivity.py,sha256=-V0C5MTqtErl86N-gyrZ487MUyiG5x1QFEZKurOpIJA,4220 +networkx/algorithms/assortativity/correlation.py,sha256=gt5tpIWbtDCTIoi5FkkbZerwdKUSQ8trITiJ3A_qEok,8689 +networkx/algorithms/assortativity/mixing.py,sha256=hufm-t94FHlwLAqxJm-jcl_VygfVzMYtjn9PJ3qX8jQ,7585 +networkx/algorithms/assortativity/neighbor_degree.py,sha256=UMaQWKBkOZ0ZgC8xGt5fXEz8OL1rgwYjt2zKbKEqofI,5282 +networkx/algorithms/assortativity/pairs.py,sha256=IhFIelzVVKr0OHC1owPgdHasADbNuR89Y4DN0IeRVnM,3401 +networkx/algorithms/assortativity/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/assortativity/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/base_test.cpython-310.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_connectivity.cpython-310.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_correlation.cpython-310.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_mixing.cpython-310.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_neighbor_degree.cpython-310.pyc,, +networkx/algorithms/assortativity/tests/__pycache__/test_pairs.cpython-310.pyc,, +networkx/algorithms/assortativity/tests/base_test.py,sha256=MNeQMLA3oBUCM8TSyNbBQ_uW0nDc1GEZYdNdUwePAm4,2651 +networkx/algorithms/assortativity/tests/test_connectivity.py,sha256=Js841GQLYTLWvc6xZhnyqj-JtyrnS0ska1TFYntxyXA,4978 +networkx/algorithms/assortativity/tests/test_correlation.py,sha256=1_D9GjLDnlT8Uy28lUn2fS1AHp2XBwiMpIl2OhRNDXk,5069 +networkx/algorithms/assortativity/tests/test_mixing.py,sha256=u-LIccNn-TeIAM766UtzUJQlY7NAbxF4EsUoKINzmlo,6820 +networkx/algorithms/assortativity/tests/test_neighbor_degree.py,sha256=ODP2M8jCaFr_l3ODwpwaz20-KqU2IFaEfJRBK53mpE8,3968 +networkx/algorithms/assortativity/tests/test_pairs.py,sha256=t05qP_-gfkbiR6aTLtE1owYl9otBSsuJcRkuZsa63UQ,3008 +networkx/algorithms/asteroidal.py,sha256=waDgHY2mHar0zqWMfaAF_3Wr8CwpdlNb3n6HhM6SkM4,5864 +networkx/algorithms/bipartite/__init__.py,sha256=NQtAEpZ0IkjGVwfUbOzD7eoPLwulb_iZfh7-aDnyPWo,3826 +networkx/algorithms/bipartite/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/bipartite/__pycache__/basic.cpython-310.pyc,, +networkx/algorithms/bipartite/__pycache__/centrality.cpython-310.pyc,, +networkx/algorithms/bipartite/__pycache__/cluster.cpython-310.pyc,, +networkx/algorithms/bipartite/__pycache__/covering.cpython-310.pyc,, +networkx/algorithms/bipartite/__pycache__/edgelist.cpython-310.pyc,, +networkx/algorithms/bipartite/__pycache__/extendability.cpython-310.pyc,, +networkx/algorithms/bipartite/__pycache__/generators.cpython-310.pyc,, +networkx/algorithms/bipartite/__pycache__/matching.cpython-310.pyc,, +networkx/algorithms/bipartite/__pycache__/matrix.cpython-310.pyc,, +networkx/algorithms/bipartite/__pycache__/projection.cpython-310.pyc,, +networkx/algorithms/bipartite/__pycache__/redundancy.cpython-310.pyc,, +networkx/algorithms/bipartite/__pycache__/spectral.cpython-310.pyc,, +networkx/algorithms/bipartite/basic.py,sha256=WT65q-pQLc6SN5OFIrK8zDHC43tsy2j0xp2ImSCVZpg,8374 +networkx/algorithms/bipartite/centrality.py,sha256=G280bAqeyXyCmes5NpRqUv2Tc-EHWrMshJ3_f4uqV9U,9156 +networkx/algorithms/bipartite/cluster.py,sha256=P_Oh89liMvxf-V-FSk6xqEtz4PGjcx4WVqeNOFOB1fg,6937 +networkx/algorithms/bipartite/covering.py,sha256=Gyy5JahsHit9ycf1CX6YhpsBAY3uXh9vrcWBW1V20go,2164 +networkx/algorithms/bipartite/edgelist.py,sha256=tZbZrCGNaBMkrombWLkqY93D_h0gxoiEe2oSS74QBP4,11358 +networkx/algorithms/bipartite/extendability.py,sha256=RBOONtAYNoDQRA-L8dOrztICGPcr6Ckc7gdB3RNIUjY,3991 +networkx/algorithms/bipartite/generators.py,sha256=jslxxmjzkTsSOzheHK5YQaOycCHgMjIM1FfBpJ5ySjM,20423 +networkx/algorithms/bipartite/matching.py,sha256=NLWosugOWc5K1vSlhoeD-UYC7UbkLnZAXGxzaS4h7uI,21636 +networkx/algorithms/bipartite/matrix.py,sha256=CpgbFU-Kr8RSyE5vYm0od4xhxmFv2a62xss8K4BdxKw,6155 +networkx/algorithms/bipartite/projection.py,sha256=y0FeeEkqRHwrYus4WMtEbcFYC9QLlr_q7mYtg0HDBgo,17207 +networkx/algorithms/bipartite/redundancy.py,sha256=YGaWS3aT-6FTIdMt159H7IdRhWudOuCp8_sdeZKHpyc,3401 +networkx/algorithms/bipartite/spectral.py,sha256=xm7TuqlZQDHGmlFzrjPM-uRNAdRi-6KKayabnf_YG4M,1901 +networkx/algorithms/bipartite/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/bipartite/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_basic.cpython-310.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_centrality.cpython-310.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_cluster.cpython-310.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_covering.cpython-310.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_edgelist.cpython-310.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_extendability.cpython-310.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_generators.cpython-310.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_matching.cpython-310.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_matrix.cpython-310.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_project.cpython-310.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_redundancy.cpython-310.pyc,, +networkx/algorithms/bipartite/tests/__pycache__/test_spectral_bipartivity.cpython-310.pyc,, +networkx/algorithms/bipartite/tests/test_basic.py,sha256=gzbtsQqPi85BznX5REdGBBJVyr9aH4nO06c3eEI4634,4291 +networkx/algorithms/bipartite/tests/test_centrality.py,sha256=PABPbrIyoAziEEQKXsZLl2jT36N8DZpNRzEO-jeu89Y,6362 +networkx/algorithms/bipartite/tests/test_cluster.py,sha256=O0VsPVt8vcY_E1FjjLJX2xaUbhVViI5MP6_gLTbEpos,2801 +networkx/algorithms/bipartite/tests/test_covering.py,sha256=EGVxYQsyLXE5yY5N5u6D4wZq2NcZe9OwlYpEuY6DF3o,1221 +networkx/algorithms/bipartite/tests/test_edgelist.py,sha256=nhA-SRF1iswNfrJpCNoDGjx3Se2Ukzs7r8TYhEldkeY,7764 +networkx/algorithms/bipartite/tests/test_extendability.py,sha256=XgPmg6bWiHAF1iQ75_r2NqUxExOQNZRUeYUPzlCa5-E,7043 +networkx/algorithms/bipartite/tests/test_generators.py,sha256=GLMThTKIfZ96NwTxIL0P0o0OAESZFfnySRkRjtKhao8,12794 +networkx/algorithms/bipartite/tests/test_matching.py,sha256=wFw095skCjW5YvQAnIie8mLacECVt0yUoeJFSj8ONAk,11972 +networkx/algorithms/bipartite/tests/test_matrix.py,sha256=1MymSi1dCUqAhTt82O2nBzjriNQtFRk6TxWGJ2FBW4k,3094 +networkx/algorithms/bipartite/tests/test_project.py,sha256=FBjkys3JYYzEG4aq_CsQrtm41edZibWI_uDAQ0b4wqM,15134 +networkx/algorithms/bipartite/tests/test_redundancy.py,sha256=ddjUzOQ0gkiWBLtVwVFYTJydaIdW3qAc4BCVscxj7-Q,919 +networkx/algorithms/bipartite/tests/test_spectral_bipartivity.py,sha256=1jGDgrIx3-TWOCNMSC4zxmZa7LHyMU69DXh3h12Bjag,2358 +networkx/algorithms/boundary.py,sha256=Ryns8peL17sBJcBUOKO26GIaTTUeFAfm6iTX2VaYzsI,5338 +networkx/algorithms/bridges.py,sha256=-SN3YpgEXWle52K3omTtLHkWvYN_6yjiZGORQc0FVYo,6087 +networkx/algorithms/broadcasting.py,sha256=eqqZJ7oDQVCl7P3-PLm-gthzSc-kWnF2D1Yv42GXoGk,4890 +networkx/algorithms/centrality/__init__.py,sha256=Er3YoYoj76UfY4P6I0L-0fCQkO7mMU0b3NLsTT2RGWI,558 +networkx/algorithms/centrality/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/betweenness.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/betweenness_subset.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/closeness.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/current_flow_betweenness.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/current_flow_betweenness_subset.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/current_flow_closeness.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/degree_alg.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/dispersion.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/eigenvector.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/flow_matrix.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/group.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/harmonic.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/katz.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/laplacian.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/load.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/percolation.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/reaching.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/second_order.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/subgraph_alg.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/trophic.cpython-310.pyc,, +networkx/algorithms/centrality/__pycache__/voterank_alg.cpython-310.pyc,, +networkx/algorithms/centrality/betweenness.py,sha256=-dVKBg2CJOChZl2r_GakATkSGTQPvlSHky2oHv0fHdk,14382 +networkx/algorithms/centrality/betweenness_subset.py,sha256=iNUqXSGn07Wd_afFf4c8G2C4J8uT2UuJHJ9oGz_ZGBY,9335 +networkx/algorithms/centrality/closeness.py,sha256=MghxdMUR2s5JQER6339E7IX8Px1NPvyBNY-mP2pxL9c,10280 +networkx/algorithms/centrality/current_flow_betweenness.py,sha256=zRtaE6HycVWHz3u3DYs9XpP2ded7h63WJ-Ls71d52-M,11847 +networkx/algorithms/centrality/current_flow_betweenness_subset.py,sha256=xkCsv6noUVen4j8AWstjfIo09mkobG7VDawSrrYxzs4,8106 +networkx/algorithms/centrality/current_flow_closeness.py,sha256=2JJuPrZfDywjRxE-MAGqOS53HXhRb_LV19JRHzCcmE8,3326 +networkx/algorithms/centrality/degree_alg.py,sha256=PNvEQa7sZsTbbWjsE4f8NdpRoybPw83OuzAlqfQ5twk,3893 +networkx/algorithms/centrality/dispersion.py,sha256=M12L2KiVPrC2-SyCXMF0kvxLelgcmvXJkLT_cBHoCTw,3631 +networkx/algorithms/centrality/eigenvector.py,sha256=WTxH5lUPfzTjIcvKY8Jio0Vj_-8KT8HxWPzjLDy9pe0,12757 +networkx/algorithms/centrality/flow_matrix.py,sha256=TnGdY1mPvRprfI8IFMdpYQd4FsiP-6PoHhT4EQ5b0EM,3833 +networkx/algorithms/centrality/group.py,sha256=BdqFUfOpuubh-pN3qDDEQDz4II82xp71LBMiRITz1OI,27959 +networkx/algorithms/centrality/harmonic.py,sha256=OlklWOmsEXBxUzHpJePZFxE-yjszd8zEEeSsFQZAktk,2630 +networkx/algorithms/centrality/katz.py,sha256=x1Lg0VkQf3TzCRJEjTi--gQDb_UPSUFNXbW7XTyWl0k,11041 +networkx/algorithms/centrality/laplacian.py,sha256=1ceW7VkhT1QrKgU6lJIrbBBvVmLZoG_hUbxNh7OLXAI,5639 +networkx/algorithms/centrality/load.py,sha256=qz4ogD1_tMDDr2uXrIg7EQnEW2CIYpEOphKMk5n_R-c,6858 +networkx/algorithms/centrality/percolation.py,sha256=YJB8iYgbpjJ3EYK8pl26iSnjgfFsK31ufytRHnUTYYE,4419 +networkx/algorithms/centrality/reaching.py,sha256=aq9MQNBHEF_zJsxdNWAfuvztTwdrNfgMALCkoBOXu2Y,7025 +networkx/algorithms/centrality/second_order.py,sha256=4CTboP95B6gUtAtSKLfeeE4s9oq0_3hXsXczxL6c_g8,5012 +networkx/algorithms/centrality/subgraph_alg.py,sha256=8yhWUYqj0trBjH21ndYyxUQt6JcbPff7v9FNY8V7214,9512 +networkx/algorithms/centrality/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/centrality/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_betweenness_centrality_subset.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_closeness_centrality.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_current_flow_betweenness_centrality_subset.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_current_flow_closeness.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_degree_centrality.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_dispersion.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_eigenvector_centrality.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_group.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_harmonic_centrality.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_katz_centrality.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_laplacian_centrality.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_load_centrality.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_percolation_centrality.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_reaching.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_second_order_centrality.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_subgraph.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_trophic.cpython-310.pyc,, +networkx/algorithms/centrality/tests/__pycache__/test_voterank.cpython-310.pyc,, +networkx/algorithms/centrality/tests/test_betweenness_centrality.py,sha256=pKoPAP1hnQSgrOxYeW5-LdUiFDANiwTn_NdOdgccbo8,26795 +networkx/algorithms/centrality/tests/test_betweenness_centrality_subset.py,sha256=HrHMcgOL69Z6y679SbqZIjkQOnqrYSz24gt17AJ9q-o,12554 +networkx/algorithms/centrality/tests/test_closeness_centrality.py,sha256=XWZivyLjxYlF41U4ktUmvULC2PMvxKs2U6BHDXRZVdE,10209 +networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality.py,sha256=VOxx1A7iSGtdEbzJYea_sW_Hv0S71-oo1CVX7Rqd5RY,7870 +networkx/algorithms/centrality/tests/test_current_flow_betweenness_centrality_subset.py,sha256=JfRGgPuiF-vJu5fc2_pcJYREEboxcK_dmy-np39c4Aw,5839 +networkx/algorithms/centrality/tests/test_current_flow_closeness.py,sha256=vflQeoNKngrGUiRb3XNlm2X9wR4vKgMSW_sCyMUCQi8,1379 +networkx/algorithms/centrality/tests/test_degree_centrality.py,sha256=TxD7UBtezF4RCdbCAuTsSB5lcFOQZrGnLOuCMa0XWY0,4105 +networkx/algorithms/centrality/tests/test_dispersion.py,sha256=ROgl_5bGhcNXonNW3ylsvUcA0NCwynsQu_scic371Gw,1959 +networkx/algorithms/centrality/tests/test_eigenvector_centrality.py,sha256=MsHKkQX7oip4v0kF28K1RjtKqxSNVykiSjg8wT20YyE,4897 +networkx/algorithms/centrality/tests/test_group.py,sha256=YmWifoTgw2gSS5BnA9G2T_Voauk_WG6v90JrZEt-Kjk,8686 +networkx/algorithms/centrality/tests/test_harmonic_centrality.py,sha256=wYP0msmB5hh5OMIxPl9t0G4QSpG3Brxw98Kh9BrRoag,3658 +networkx/algorithms/centrality/tests/test_katz_centrality.py,sha256=JL0bZZsJe2MQFL6urXgY82wCAwucUvhjaShYZPxpL6U,11240 +networkx/algorithms/centrality/tests/test_laplacian_centrality.py,sha256=vY-NULtr_U_GxUMwfAZB-iccxIRTiqqUN4Q8HRNpzSo,5916 +networkx/algorithms/centrality/tests/test_load_centrality.py,sha256=Vv3zSW89iELN-8KNbUclmkhOe1LzKdF7U_w34nYovIo,11343 +networkx/algorithms/centrality/tests/test_percolation_centrality.py,sha256=ycQ1fvEZZcWAfqL11urT7yHiEP77usJDSG25OQiDM2s,2591 +networkx/algorithms/centrality/tests/test_reaching.py,sha256=sqQUPspoiWxs9tD77UwngBkMVFYjRzhayVxPqX9_XbY,4143 +networkx/algorithms/centrality/tests/test_second_order_centrality.py,sha256=ce0wQ4T33lu23wskzGUnBS7X4BSODlvAX1S5KxlLzOA,1999 +networkx/algorithms/centrality/tests/test_subgraph.py,sha256=vhE9Uh-_Hlk49k-ny6ORHCgqk7LWH8OHIYOEYM96uz0,3729 +networkx/algorithms/centrality/tests/test_trophic.py,sha256=AzV6rwcTa4b4tcenoKh95o6VF-z7w75l81ZOdhhi6yE,8705 +networkx/algorithms/centrality/tests/test_voterank.py,sha256=7Z9aQYKqEw_txBbWTz1FZWJzUmhjlMfDFSRIKHBdkOk,1692 +networkx/algorithms/centrality/trophic.py,sha256=WyBOsNO_vLb4fcpL_u6XuOoalKbjukpzsZxyZDxWJIE,4678 +networkx/algorithms/centrality/voterank_alg.py,sha256=cw9ZaWf6svnbtgzNgX34tJDevXt9iUE2Zraf5TGHDjs,3230 +networkx/algorithms/chains.py,sha256=PPiSq5-GsT1Lsf8fwtGwGDVf1hhv5ZLariWtfzkBbAw,6968 +networkx/algorithms/chordal.py,sha256=w-EPJNn0H4G_b8fItmtzrorm0dMmiP7YE41yEzn0RgU,13410 +networkx/algorithms/clique.py,sha256=qlccLOScGphxo4gYKO7OhFD9JmIcf1yiV0CclQOKnPE,25871 +networkx/algorithms/cluster.py,sha256=x7dIotmBaBU3yaIzphjAyA2B-FHS_iiQ5nF-FeinQlU,20359 +networkx/algorithms/coloring/__init__.py,sha256=P1cmqrAjcaCdObkNZ1e6Hp__ZpxBAhQx0iIipOVW8jg,182 +networkx/algorithms/coloring/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/coloring/__pycache__/equitable_coloring.cpython-310.pyc,, +networkx/algorithms/coloring/__pycache__/greedy_coloring.cpython-310.pyc,, +networkx/algorithms/coloring/equitable_coloring.py,sha256=uDcza6PD9qbvwVPUX1MBZbopQdrAEKNk6DpCFkc02tU,16315 +networkx/algorithms/coloring/greedy_coloring.py,sha256=QHbXyBJ343vD2lY1ibXNYl-X8L-CMLkPOs3gNa7WEP0,20045 +networkx/algorithms/coloring/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/coloring/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/coloring/tests/__pycache__/test_coloring.cpython-310.pyc,, +networkx/algorithms/coloring/tests/test_coloring.py,sha256=jbynPtdFLaJHKt77AR24gJT4B5C8h6pKQ90oyxepOYM,23699 +networkx/algorithms/communicability_alg.py,sha256=yRn0n_CyeSbNihMipwXG3aksli0ehlsYYHD_dULQ7U4,4544 +networkx/algorithms/community/__init__.py,sha256=0U-iJWeQttY972nar-qbwFFImqEOETQnKoBOlXHDpsE,1178 +networkx/algorithms/community/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/community/__pycache__/asyn_fluid.cpython-310.pyc,, +networkx/algorithms/community/__pycache__/centrality.cpython-310.pyc,, +networkx/algorithms/community/__pycache__/community_utils.cpython-310.pyc,, +networkx/algorithms/community/__pycache__/divisive.cpython-310.pyc,, +networkx/algorithms/community/__pycache__/kclique.cpython-310.pyc,, +networkx/algorithms/community/__pycache__/kernighan_lin.cpython-310.pyc,, +networkx/algorithms/community/__pycache__/label_propagation.cpython-310.pyc,, +networkx/algorithms/community/__pycache__/louvain.cpython-310.pyc,, +networkx/algorithms/community/__pycache__/lukes.cpython-310.pyc,, +networkx/algorithms/community/__pycache__/modularity_max.cpython-310.pyc,, +networkx/algorithms/community/__pycache__/quality.cpython-310.pyc,, +networkx/algorithms/community/asyn_fluid.py,sha256=0ktsoOa4JKBKiuE3wmGDcBSUgPlFdGvzNheqINtWKbk,5935 +networkx/algorithms/community/centrality.py,sha256=Yyv5kyf1hf_L7iQ_ZbG8_FAkP638Sc_3N4tCSoB6J1w,6635 +networkx/algorithms/community/community_utils.py,sha256=YPPninS6Xf7L5ZH9tLYxaFYMDVyMED6IsfJqXCq5tHA,907 +networkx/algorithms/community/divisive.py,sha256=gH4DFsHLXSP8rJFn5Ied_vk0gV8T8k520D2w9t5nhrA,6416 +networkx/algorithms/community/kclique.py,sha256=DTr9iUT_XWv0S3Y79KQl6OXefjztNMc9SAHWhdFOxcU,2460 +networkx/algorithms/community/kernighan_lin.py,sha256=vPU8Mbpk7_NscMC-gorNoXhsQjkOhgK2YiKOo-u6DvY,4349 +networkx/algorithms/community/label_propagation.py,sha256=5s-_nRrZqT5hNv_kNOLh7pC_RYJR4R6ztBJaC6h-yuQ,11877 +networkx/algorithms/community/louvain.py,sha256=zh5h16hRWzgTv9IUqWiiJKFntZhQbB_EHNYIGViwPas,15365 +networkx/algorithms/community/lukes.py,sha256=gzqnup95RR2UzUiPpIt8qkepzZ9dCWqHGQSVPIJDMx8,8115 +networkx/algorithms/community/modularity_max.py,sha256=gzyZrGHNMtTZyqpLFcJHxgzzIsar1m5DktScODoUngk,18082 +networkx/algorithms/community/quality.py,sha256=dVIkV-CFKdAou0WjgIDmfhnpIIqReRaeL4odg39XAYk,11939 +networkx/algorithms/community/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/community/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/community/tests/__pycache__/test_asyn_fluid.cpython-310.pyc,, +networkx/algorithms/community/tests/__pycache__/test_centrality.cpython-310.pyc,, +networkx/algorithms/community/tests/__pycache__/test_divisive.cpython-310.pyc,, +networkx/algorithms/community/tests/__pycache__/test_kclique.cpython-310.pyc,, +networkx/algorithms/community/tests/__pycache__/test_kernighan_lin.cpython-310.pyc,, +networkx/algorithms/community/tests/__pycache__/test_label_propagation.cpython-310.pyc,, +networkx/algorithms/community/tests/__pycache__/test_louvain.cpython-310.pyc,, +networkx/algorithms/community/tests/__pycache__/test_lukes.cpython-310.pyc,, +networkx/algorithms/community/tests/__pycache__/test_modularity_max.cpython-310.pyc,, +networkx/algorithms/community/tests/__pycache__/test_quality.cpython-310.pyc,, +networkx/algorithms/community/tests/__pycache__/test_utils.cpython-310.pyc,, +networkx/algorithms/community/tests/test_asyn_fluid.py,sha256=UzAMxJzhN74qUinehR7B1rhU_vsigJ7-cRvcE6jdKyc,3332 +networkx/algorithms/community/tests/test_centrality.py,sha256=ADU1mFn7yl9kTtQjOkfPtjpmkBR_i_6hwbVkWh5qZmw,2931 +networkx/algorithms/community/tests/test_divisive.py,sha256=-Ee40OR-mPDReTngTEhbpx4_uLtNI7cqFkt8cZT9t5Y,3441 +networkx/algorithms/community/tests/test_kclique.py,sha256=iA0SBqwbDfaD2u7KM6ccs6LfgAQY_xxrnW05UIT_tFA,2413 +networkx/algorithms/community/tests/test_kernighan_lin.py,sha256=s8bK53Y1a87zvlZ1AJE-QJ2vItnbscSOlHQSrMpetGI,2709 +networkx/algorithms/community/tests/test_label_propagation.py,sha256=IHidFEv7MI781zsdk7XT848rLvLwDk2wBK1FjL-CRv4,7985 +networkx/algorithms/community/tests/test_louvain.py,sha256=TwW1nlSKWGJeIKr9QOJ8xGehSY6R0Nz01xsnFqzt0Oo,8071 +networkx/algorithms/community/tests/test_lukes.py,sha256=f_JU-EzY6PwXEkPN8kk5_3NVg6phlX0nrj1f57M49lk,3961 +networkx/algorithms/community/tests/test_modularity_max.py,sha256=XYyPuDkxL4CYFwnpTdU_qD4GydpqgiRAIJO3CHQN_m4,10617 +networkx/algorithms/community/tests/test_quality.py,sha256=_kbOlYD1mpPduNQU1wJx58we6Z8CbmQ8wsDwOqTE4hg,5274 +networkx/algorithms/community/tests/test_utils.py,sha256=r_YEdGUaGZo8B16FxzocmkgpRrWgqyN7ehvx_qFiYu4,706 +networkx/algorithms/components/__init__.py,sha256=Dt74KZWp_cJ_j0lL5hd_S50_hia5DKcC2SjuRnubr6M,173 +networkx/algorithms/components/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/components/__pycache__/attracting.cpython-310.pyc,, +networkx/algorithms/components/__pycache__/biconnected.cpython-310.pyc,, +networkx/algorithms/components/__pycache__/connected.cpython-310.pyc,, +networkx/algorithms/components/__pycache__/semiconnected.cpython-310.pyc,, +networkx/algorithms/components/__pycache__/strongly_connected.cpython-310.pyc,, +networkx/algorithms/components/__pycache__/weakly_connected.cpython-310.pyc,, +networkx/algorithms/components/attracting.py,sha256=LZmBD3GnsP8k9CWeW98TqYxrGv0z4XOcFiWa08--gHw,2711 +networkx/algorithms/components/biconnected.py,sha256=TPx3H63C_a4Aur1n8pkaz7veiMO0oOOkrWapGMZ-YPs,12781 +networkx/algorithms/components/connected.py,sha256=JtInjl-bmIPZoZ2qe3TZCZyNWRR8y3QsGl44DH7Lh7E,4433 +networkx/algorithms/components/semiconnected.py,sha256=Lu0tzwL_TI_Sv-xAKubu5WtUXlcDaRix9ggDIBPc8M0,2029 +networkx/algorithms/components/strongly_connected.py,sha256=43XUcIJ-6iLDwd5qlJ9FWp7s-D70h57dhNKVB6XSPlY,11744 +networkx/algorithms/components/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/components/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/components/tests/__pycache__/test_attracting.cpython-310.pyc,, +networkx/algorithms/components/tests/__pycache__/test_biconnected.cpython-310.pyc,, +networkx/algorithms/components/tests/__pycache__/test_connected.cpython-310.pyc,, +networkx/algorithms/components/tests/__pycache__/test_semiconnected.cpython-310.pyc,, +networkx/algorithms/components/tests/__pycache__/test_strongly_connected.cpython-310.pyc,, +networkx/algorithms/components/tests/__pycache__/test_weakly_connected.cpython-310.pyc,, +networkx/algorithms/components/tests/test_attracting.py,sha256=b3N3ZR9E5gLSQWGgaqhcRfRs4KBW6GnnkVYeAjdxC_o,2243 +networkx/algorithms/components/tests/test_biconnected.py,sha256=N-J-dgBgI77ytYUUrXjduLxtDydH7jS-af98fyPBkYc,6036 +networkx/algorithms/components/tests/test_connected.py,sha256=g4KIvumz-lFNpZi8C70vhWfUsp2X2_UNn7p7R92EOPU,3987 +networkx/algorithms/components/tests/test_semiconnected.py,sha256=q860lIxZF5M2JmDwwdzy-SGSXnrillOefMx23GcJpw0,1792 +networkx/algorithms/components/tests/test_strongly_connected.py,sha256=GBuM8ie_etN6IyhnsZxqR5rnsgU2hejKlsKYwkBGx-4,6479 +networkx/algorithms/components/tests/test_weakly_connected.py,sha256=_eUx7226dxme_K2WNmvSIwZXQlKNoCuglWOOC3kFUW4,3083 +networkx/algorithms/components/weakly_connected.py,sha256=yHd0iyjdbT3_VaCTWx9dybeFQEnas2raa1MpQZEchOI,4344 +networkx/algorithms/connectivity/__init__.py,sha256=VuUXTkagxX-tHjgmeYJ3K4Eq_luK6kSpv1nZwiwGFd8,281 +networkx/algorithms/connectivity/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/connectivity/__pycache__/connectivity.cpython-310.pyc,, +networkx/algorithms/connectivity/__pycache__/cuts.cpython-310.pyc,, +networkx/algorithms/connectivity/__pycache__/disjoint_paths.cpython-310.pyc,, +networkx/algorithms/connectivity/__pycache__/edge_augmentation.cpython-310.pyc,, +networkx/algorithms/connectivity/__pycache__/edge_kcomponents.cpython-310.pyc,, +networkx/algorithms/connectivity/__pycache__/kcomponents.cpython-310.pyc,, +networkx/algorithms/connectivity/__pycache__/kcutsets.cpython-310.pyc,, +networkx/algorithms/connectivity/__pycache__/stoerwagner.cpython-310.pyc,, +networkx/algorithms/connectivity/__pycache__/utils.cpython-310.pyc,, +networkx/algorithms/connectivity/connectivity.py,sha256=jubbwh9Ech4ft4UdZB0F7nhNGgTCVoeOJF4DZhLohBQ,29687 +networkx/algorithms/connectivity/cuts.py,sha256=p0jdkx6YN7SAoM5LFmn7wBFxmEdYjLR5b7mjm7vPFzA,23014 +networkx/algorithms/connectivity/disjoint_paths.py,sha256=0adHh-ZWZFWuTCJNjCk08i5UgmepcAvjr2QK8D8L_Ic,14648 +networkx/algorithms/connectivity/edge_augmentation.py,sha256=rnoH1M1T1aZIdGnddd10uBrd4XVTrJ-mYZFBTIdSbKw,44060 +networkx/algorithms/connectivity/edge_kcomponents.py,sha256=jPaG6-mx96-HRIF8PjQXV4QtClYJMWPysI6PT-vNoIc,20893 +networkx/algorithms/connectivity/kcomponents.py,sha256=ba9EytfQH5f75h5ljaFmepdXXBnQXajuUBqVVVvD1sk,8170 +networkx/algorithms/connectivity/kcutsets.py,sha256=b1MOmaycITjWno4axzIG5QLlijLfJInCu3mzXTReD4w,9370 +networkx/algorithms/connectivity/stoerwagner.py,sha256=HfO_S3-f7uIGRlxAFaWnNYHpYwLVFc8QOgSdOoQqTIs,5430 +networkx/algorithms/connectivity/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/connectivity/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_connectivity.cpython-310.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_cuts.cpython-310.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_disjoint_paths.cpython-310.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_edge_augmentation.cpython-310.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_edge_kcomponents.cpython-310.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_kcomponents.cpython-310.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_kcutsets.cpython-310.pyc,, +networkx/algorithms/connectivity/tests/__pycache__/test_stoer_wagner.cpython-310.pyc,, +networkx/algorithms/connectivity/tests/test_connectivity.py,sha256=eSmsi8uQk6MI591JgtSu2elIusb08bmSZS0h9gxb76I,15027 +networkx/algorithms/connectivity/tests/test_cuts.py,sha256=4F8seWb-sPDDjjVMkh14gst5UQa5f-zDkCsZIdJjVzo,10353 +networkx/algorithms/connectivity/tests/test_disjoint_paths.py,sha256=NLHReLoXSKoA6KPBNRbjF84ktg5PEaaktIj2AII3SDY,8392 +networkx/algorithms/connectivity/tests/test_edge_augmentation.py,sha256=d3ymFHyY2G4cpy1Y6wu4ze339qfF2LRp2HmGAIVjnMM,15731 +networkx/algorithms/connectivity/tests/test_edge_kcomponents.py,sha256=CZ26Dy91WOUqhw1X73mqLGX-WHWzBBIeBCgrp6KK4Zo,16453 +networkx/algorithms/connectivity/tests/test_kcomponents.py,sha256=ohoSX8GACeszRZdzTiNuWXSFitfU9DzP0hqllS2gvMU,8554 +networkx/algorithms/connectivity/tests/test_kcutsets.py,sha256=sVKjwQt3FUqtnlY2xuHn6VGY9rvUkYoVp7v5fK-6aJw,8610 +networkx/algorithms/connectivity/tests/test_stoer_wagner.py,sha256=A291C30_t2CI1erPCqN1W0DoAj3zqNA8fThPIj4Rku0,3011 +networkx/algorithms/connectivity/utils.py,sha256=ynrrShW4QvxxOEsN_iBAgNPkcoMFZ7KBE4oetvT-cNc,3216 +networkx/algorithms/core.py,sha256=oIomkMWZvCCN_1t1keGcXpjUcnf3n4kM5t_dMwGU1UU,19183 +networkx/algorithms/covering.py,sha256=IEMNtzDkHTdN9wYn1Dw3yMN4920Qc4EY4PssMAhMtAU,5295 +networkx/algorithms/cuts.py,sha256=kOGGQ-ZGdRoiZDRhXj68Epa7tgJzI5826nJbESdX0d4,9992 +networkx/algorithms/cycles.py,sha256=ufAiKuQQup5p7PUdZtHiDsnyOEFUGWTAg1dgnchrZpw,43174 +networkx/algorithms/d_separation.py,sha256=3O_5RIWziPQ5xwRn-yAjH28xrkSaVIVbCFpw7K2Pa2A,27283 +networkx/algorithms/dag.py,sha256=I2HmgASMd83O3m5VtOTdXKQPO_IK2Ra_p96qHxJnEvY,39428 +networkx/algorithms/distance_measures.py,sha256=6A5bB4KtKdgJ31AGVqqOCLMAyhHMW3Qkn8PBxYzHxHg,31830 +networkx/algorithms/distance_regular.py,sha256=-1QCGLy7OPoNuV2bYJDY4jVot-0LGMobBQ0DubjbhGI,7053 +networkx/algorithms/dominance.py,sha256=Ox3nSj6dbIgFQxU1HlhUA4pB7hgHsXtV8aoo_5Tjesg,3430 +networkx/algorithms/dominating.py,sha256=m81MIzNsxuY4f8GRDqin6av-CZTD_7dVmO4Ce-fKhjA,2668 +networkx/algorithms/efficiency_measures.py,sha256=e_FdO7BvOBkf1HfbRKgdjaMtai67ZcRc2sFFVHWXadk,4798 +networkx/algorithms/euler.py,sha256=YWsDcDV8nN92iSAc6X_cg1XkeXGwuVPFmVRlC5A2hIc,14204 +networkx/algorithms/flow/__init__.py,sha256=rVtMUy6dViPLewjDRntmn15QF0bQwiDdQbZZx9j7Drc,341 +networkx/algorithms/flow/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/flow/__pycache__/boykovkolmogorov.cpython-310.pyc,, +networkx/algorithms/flow/__pycache__/capacityscaling.cpython-310.pyc,, +networkx/algorithms/flow/__pycache__/dinitz_alg.cpython-310.pyc,, +networkx/algorithms/flow/__pycache__/edmondskarp.cpython-310.pyc,, +networkx/algorithms/flow/__pycache__/gomory_hu.cpython-310.pyc,, +networkx/algorithms/flow/__pycache__/maxflow.cpython-310.pyc,, +networkx/algorithms/flow/__pycache__/mincost.cpython-310.pyc,, +networkx/algorithms/flow/__pycache__/networksimplex.cpython-310.pyc,, +networkx/algorithms/flow/__pycache__/preflowpush.cpython-310.pyc,, +networkx/algorithms/flow/__pycache__/shortestaugmentingpath.cpython-310.pyc,, +networkx/algorithms/flow/__pycache__/utils.cpython-310.pyc,, +networkx/algorithms/flow/boykovkolmogorov.py,sha256=jIzy7CgUG710E2XKGpA7N2yyM3hXmGK5RdrVbo7qFt8,13333 +networkx/algorithms/flow/capacityscaling.py,sha256=8rng2qO5kawNSxq2S8BNlUMmdvNSoC6R8ekiBGU8LxU,14469 +networkx/algorithms/flow/dinitz_alg.py,sha256=SEFw8s-KlRPvpZ9Rzhilgw66oKrWyKyw48ugsOUBQJg,8340 +networkx/algorithms/flow/edmondskarp.py,sha256=PEIwLftevS2VYHaTzzZMSOLPy7QSBPsWPedjx1lR6Cs,8056 +networkx/algorithms/flow/gomory_hu.py,sha256=R9W5V-LfQirf9ysckI5ty5anq-UyaMwasnoqcCrRaXc,6344 +networkx/algorithms/flow/maxflow.py,sha256=PXmPSNzXgxli6x769mNYCAbC4KwaT_znwvz0IxjCcyw,22759 +networkx/algorithms/flow/mincost.py,sha256=GzMYInS4QcNe0yImGrVXJ0bRd7t5TSSMa9jSeenIoOk,12853 +networkx/algorithms/flow/networksimplex.py,sha256=32uetoZWj-_7KPO2OJputP0FpTrsQ_qJxntC8XxIVr0,25185 +networkx/algorithms/flow/preflowpush.py,sha256=CUKZ0-7X9l7P7qH_2n2Immbf8mFm8vocH2SY0tIwjGo,15721 +networkx/algorithms/flow/shortestaugmentingpath.py,sha256=gXXdkY3nH4d0hXVn0P2-kzfC3DHcuCdrudFdxetflKI,10372 +networkx/algorithms/flow/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/flow/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_gomory_hu.cpython-310.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_maxflow.cpython-310.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_maxflow_large_graph.cpython-310.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_mincost.cpython-310.pyc,, +networkx/algorithms/flow/tests/__pycache__/test_networksimplex.cpython-310.pyc,, +networkx/algorithms/flow/tests/gl1.gpickle.bz2,sha256=z4-BzrXqruFiGqYLiS2D5ZamFz9vZRc1m2ef89qhsPg,44623 +networkx/algorithms/flow/tests/gw1.gpickle.bz2,sha256=b3nw6Q-kxR7HkWXxWWPh7YlHdXbga8qmeuYiwmBBGTE,42248 +networkx/algorithms/flow/tests/netgen-2.gpickle.bz2,sha256=OxfmbN7ajtuNHexyYmx38fZd1GdeP3bcL8T9hKoDjjA,18972 +networkx/algorithms/flow/tests/test_gomory_hu.py,sha256=aWtbI3AHofIK6LDJnmj9UH1QOfulXsi5NyB7bNyV2Vw,4471 +networkx/algorithms/flow/tests/test_maxflow.py,sha256=YRgkrdRj6NMHOXio2Zgr7-ErEzCbq7Z0w90azNffCC4,18727 +networkx/algorithms/flow/tests/test_maxflow_large_graph.py,sha256=fMweTQ3MzsZWYI-ul2dGR8OfGQeo8df2fLeCleHqxZw,4623 +networkx/algorithms/flow/tests/test_mincost.py,sha256=n4fFLDwDLy7Tau-_ey1CoxZwKhFjk28GLGJjCyxhClk,17816 +networkx/algorithms/flow/tests/test_networksimplex.py,sha256=bsVxlvHAD0K7aDevCcVaa9uRNNsWAevw6yUKlj2T8No,12103 +networkx/algorithms/flow/tests/wlm3.gpickle.bz2,sha256=zKy6Hg-_swvsNh8OSOyIyZnTR0_Npd35O9RErOF8-g4,88132 +networkx/algorithms/flow/utils.py,sha256=bCeiFAiyFe4-ptkCopo_PnQKF9xY5M8Br87hJT3fRWQ,6084 +networkx/algorithms/graph_hashing.py,sha256=duPonk1Bv9Lc8-bWY5wSkbkyi7yJuCJvR_eGiyRHxGg,12427 +networkx/algorithms/graphical.py,sha256=dt24mdupuU-6P3wwKWm2u0Mj5Wf3HntfJK9yNMJPKgY,15831 +networkx/algorithms/hierarchy.py,sha256=T8el6aWy8_cH74IHyhw3L4chNN2U_VIzTYE0IbCCJRQ,1545 +networkx/algorithms/hybrid.py,sha256=UV47QxghspuRhMCqQRjm-5Dt8maRgoGjqZ_XSt0oTcU,6208 +networkx/algorithms/isolate.py,sha256=g2YxL61zK9mGaT6mMxOe2qjnliUC5DVeH-VSYS8XYG4,2337 +networkx/algorithms/isomorphism/__init__.py,sha256=gPRQ-_X6xN2lJZPQNw86IVj4NemGmbQYTejf5yJ32N4,406 +networkx/algorithms/isomorphism/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/isomorphism/__pycache__/ismags.cpython-310.pyc,, +networkx/algorithms/isomorphism/__pycache__/isomorph.cpython-310.pyc,, +networkx/algorithms/isomorphism/__pycache__/isomorphvf2.cpython-310.pyc,, +networkx/algorithms/isomorphism/__pycache__/matchhelpers.cpython-310.pyc,, +networkx/algorithms/isomorphism/__pycache__/temporalisomorphvf2.cpython-310.pyc,, +networkx/algorithms/isomorphism/__pycache__/tree_isomorphism.cpython-310.pyc,, +networkx/algorithms/isomorphism/__pycache__/vf2pp.cpython-310.pyc,, +networkx/algorithms/isomorphism/__pycache__/vf2userfunc.cpython-310.pyc,, +networkx/algorithms/isomorphism/ismags.py,sha256=TpZP5xDxLITCGOk8DT4EBVaWDbbjzEUT5ZOCDNGAho0,43239 +networkx/algorithms/isomorphism/isomorph.py,sha256=CzMKwPMlCBpGIbO8X8SzCg_cdWUMlHFUkUmnepcGfNg,7113 +networkx/algorithms/isomorphism/isomorphvf2.py,sha256=qAK4eCY_8adSnF6v5Yv6oRYuBluapgdlmCgJ7_MJKTk,40980 +networkx/algorithms/isomorphism/matchhelpers.py,sha256=iDPnAjTBCWNtt8J45TWZJ-oo0mHpRg2L7d2D-7fqYGk,10883 +networkx/algorithms/isomorphism/temporalisomorphvf2.py,sha256=yX-vOLLjV9_jycbpEy0MQbw8kfbA6vQieemlQz7OxSk,10888 +networkx/algorithms/isomorphism/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/isomorphism/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_ismags.cpython-310.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphism.cpython-310.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_isomorphvf2.cpython-310.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_match_helpers.cpython-310.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_temporalisomorphvf2.cpython-310.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_tree_isomorphism.cpython-310.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp.cpython-310.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_vf2pp_helpers.cpython-310.pyc,, +networkx/algorithms/isomorphism/tests/__pycache__/test_vf2userfunc.cpython-310.pyc,, +networkx/algorithms/isomorphism/tests/iso_r01_s80.A99,sha256=hKzMtYLUR8Oqp9pmJR6RwG7qo31aNPZcnXy4KHDGhqU,1442 +networkx/algorithms/isomorphism/tests/iso_r01_s80.B99,sha256=AHx_W2xG4JEcz1xKoN5TwCHVE6-UO2PiMByynkd4TPE,1442 +networkx/algorithms/isomorphism/tests/si2_b06_m200.A99,sha256=NVnPFA52amNl3qM55G1V9eL9ZlP9NwugBlPf-zekTFU,310 +networkx/algorithms/isomorphism/tests/si2_b06_m200.B99,sha256=-clIDp05LFNRHA2BghhGTeyuXDqBBqA9XpEzpB7Ku7M,1602 +networkx/algorithms/isomorphism/tests/test_ismags.py,sha256=2sOkbB7Aejnq4zDx9BhJyfavf5DLiKJaUPusb3fhGRk,10585 +networkx/algorithms/isomorphism/tests/test_isomorphism.py,sha256=kF-o4dTjB7Ad0NOHnUGoiOCCNr3MWSmJm_YBc-Wvhgk,2022 +networkx/algorithms/isomorphism/tests/test_isomorphvf2.py,sha256=s4yO4cHJk5qIpRemnSzD1MJEeSJPNpZcOU6LeWVhGXI,11751 +networkx/algorithms/isomorphism/tests/test_match_helpers.py,sha256=uuTcvjgf2LPqSQzzECPIh0dezw8-a1IN0u42u8TxwAw,2483 +networkx/algorithms/isomorphism/tests/test_temporalisomorphvf2.py,sha256=DZy2zAt74jiTAM-jGK5H9aGRn1ZsMgQl9K5UNsu178Y,7346 +networkx/algorithms/isomorphism/tests/test_tree_isomorphism.py,sha256=0-7waJjupg8AWfQDqrcsJVOgTXk7HePr5kt87MgnPtM,7412 +networkx/algorithms/isomorphism/tests/test_vf2pp.py,sha256=65RkN1mPWLoxirE7SlIvfaKMJk80b_ZwWG6HTJtlkPg,49924 +networkx/algorithms/isomorphism/tests/test_vf2pp_helpers.py,sha256=HnXcdy2LTBFX423nIdJ8CbwmfkHFmzf1XNa8-xld5jk,90125 +networkx/algorithms/isomorphism/tests/test_vf2userfunc.py,sha256=yby-vt4sYxc1uzlnD-iETREbojgNkpQGbLkrPER_Sss,6629 +networkx/algorithms/isomorphism/tree_isomorphism.py,sha256=fj1cUspSojUVwmAdWKGzXEHqOawUNJgzfO9QjCEnPLs,9454 +networkx/algorithms/isomorphism/vf2pp.py,sha256=4CykBmrp8RGZl5ZSdfW0jhsSdkK1EvdqoALVn1u4OF0,36375 +networkx/algorithms/isomorphism/vf2userfunc.py,sha256=VVTNWEzHnRaZrjtinBnkStRNsvC9FVvivXWs-pqG6LM,7475 +networkx/algorithms/link_analysis/__init__.py,sha256=UkcgTDdzsIu-jsJ4jBwP8sF2CsRPC1YcZZT-q5Wlj3I,118 +networkx/algorithms/link_analysis/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/link_analysis/__pycache__/hits_alg.cpython-310.pyc,, +networkx/algorithms/link_analysis/__pycache__/pagerank_alg.cpython-310.pyc,, +networkx/algorithms/link_analysis/hits_alg.py,sha256=XlapG3wm5CHJ7Fg5spDo0vPnsgm_e05_2WQjmwyAK98,10421 +networkx/algorithms/link_analysis/pagerank_alg.py,sha256=MyKsd4GvcF1wfB-K_BJBHtUoYB-as4o_bxuhIm0CtN4,17191 +networkx/algorithms/link_analysis/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/link_analysis/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/link_analysis/tests/__pycache__/test_hits.cpython-310.pyc,, +networkx/algorithms/link_analysis/tests/__pycache__/test_pagerank.cpython-310.pyc,, +networkx/algorithms/link_analysis/tests/test_hits.py,sha256=QjSZZmrj3rBLNVpKOIHUvJNYM7OJ1b-yjiaglyVzNyw,2547 +networkx/algorithms/link_analysis/tests/test_pagerank.py,sha256=f5QWokpJEDf3d9SLfCcVKpsdEBMRi0vJgRTz8Oa1DuE,7534 +networkx/algorithms/link_prediction.py,sha256=KLmkEggJ6ltLUXPuisRiab7eH7pEsy3UaaxxIsT7crY,22256 +networkx/algorithms/lowest_common_ancestors.py,sha256=7BgNpBFP9PFkDQceeh7jf9NFYuLCboT0YReIsXLkItg,9197 +networkx/algorithms/matching.py,sha256=rPn3P_2xDAXwM8IqOrZ3asHx4jEJ9vv_83AK2ZBMsAQ,44549 +networkx/algorithms/minors/__init__.py,sha256=ceeKdsZ6U1H40ED-KmtVGkbADxeWMTVG07Ja8P7N_Pg,587 +networkx/algorithms/minors/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/minors/__pycache__/contraction.cpython-310.pyc,, +networkx/algorithms/minors/contraction.py,sha256=qIFmtFQislTZfNQU3IPzQeoegecw0ST5sOJdO_GUi4E,22869 +networkx/algorithms/minors/tests/__pycache__/test_contraction.cpython-310.pyc,, +networkx/algorithms/minors/tests/test_contraction.py,sha256=rob7wHlt3xoXYxpcXQOwm7zP0TLyRqWV1JxsZlE8kfo,14212 +networkx/algorithms/mis.py,sha256=kcmWs7F6Fxx0r4cRiasyWRU2UjVCIMEuW2xSIgcWux4,2343 +networkx/algorithms/moral.py,sha256=z5lp42k4kqYk7t_FfszVj5KAC7BxXe6Adik3T2qvA6o,1535 +networkx/algorithms/node_classification.py,sha256=FZItO-HeKsugbGGKU3crYVRyB2VXODjNc3jh_8VSvvY,6469 +networkx/algorithms/non_randomness.py,sha256=PpDcPqY5sjnxr4yO6VhS7nzx3THLNiKqE8oORU-4wPA,2904 +networkx/algorithms/operators/__init__.py,sha256=dJ3xOXvHxSzzM3-YcfvjGTJ_ndxULF1TybkIRzUS87Y,201 +networkx/algorithms/operators/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/operators/__pycache__/all.cpython-310.pyc,, +networkx/algorithms/operators/__pycache__/binary.cpython-310.pyc,, +networkx/algorithms/operators/__pycache__/product.cpython-310.pyc,, +networkx/algorithms/operators/__pycache__/unary.cpython-310.pyc,, +networkx/algorithms/operators/all.py,sha256=dAlalaC4KR4hXsRole255cAsDb4mXNN5p2hCYB2sWvw,9652 +networkx/algorithms/operators/binary.py,sha256=dVfq_I9MMRm1c-Xo26q_sDQ8sOgYEd2cY6qaOH7FUkA,12935 +networkx/algorithms/operators/product.py,sha256=RAMTwu8MxWjaD5SZO-VhPy0Dk1EmK7pXDrID5XuK1R4,19603 +networkx/algorithms/operators/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/operators/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/operators/tests/__pycache__/test_all.cpython-310.pyc,, +networkx/algorithms/operators/tests/__pycache__/test_binary.cpython-310.pyc,, +networkx/algorithms/operators/tests/__pycache__/test_product.cpython-310.pyc,, +networkx/algorithms/operators/tests/__pycache__/test_unary.cpython-310.pyc,, +networkx/algorithms/operators/tests/test_all.py,sha256=Pqjv9QiA0875Yl9D5o6c5Ml0t4KHpH2a5jbpAoZQXFc,8250 +networkx/algorithms/operators/tests/test_binary.py,sha256=CvZpOXgXuHuzx7cB1f1ggfoOXqXQHelY5_Sp5Mr_6HE,12909 +networkx/algorithms/operators/tests/test_product.py,sha256=igu1MnYf0S02nXfTELaNIy9OGwrJbZ2C7DIbJcfH0a4,15156 +networkx/algorithms/operators/tests/test_unary.py,sha256=UZdzbt5GI9hnflEizUWXihGqBWmSFJDkzjwVv6wziQE,1415 +networkx/algorithms/operators/unary.py,sha256=LN5mU30rkKW7Wo5l6trQarrxwq1O0iHjHi81ABdxtTw,1794 +networkx/algorithms/planar_drawing.py,sha256=AXuoT3aFgEtCeMnAaUsRqjxCABdNYZ8Oo9sGOKBQto0,16254 +networkx/algorithms/planarity.py,sha256=PhIhnecPna-J_v7taoj-Ie175XWayVfcuMDHkj2bWLc,47249 +networkx/algorithms/polynomials.py,sha256=9nHrqjz7K1nlUbUV7bGao3Liru9dYH_KQt_EfVSVrBg,11278 +networkx/algorithms/reciprocity.py,sha256=qrHCIynxabOQXU7uK8olOxHI5Q7HacH3MUU9vDDnFMc,2854 +networkx/algorithms/regular.py,sha256=fqSEop3OtABqXti4b46sy_ti3RyJCsuU2Ww8QBFvIXA,6793 +networkx/algorithms/richclub.py,sha256=kARzso3M6wnUcAJo2g8ga_ZtigL2czDNzeUDzBtRfqo,4892 +networkx/algorithms/shortest_paths/__init__.py,sha256=Rmxtsje-mPdQyeYhE8TP2NId-iZEOu4eAsWhVRm2Xqk,285 +networkx/algorithms/shortest_paths/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/shortest_paths/__pycache__/astar.cpython-310.pyc,, +networkx/algorithms/shortest_paths/__pycache__/dense.cpython-310.pyc,, +networkx/algorithms/shortest_paths/__pycache__/generic.cpython-310.pyc,, +networkx/algorithms/shortest_paths/__pycache__/unweighted.cpython-310.pyc,, +networkx/algorithms/shortest_paths/__pycache__/weighted.cpython-310.pyc,, +networkx/algorithms/shortest_paths/astar.py,sha256=W4zpRie8oxxQci_4v3wmCjMATbDZRPSIaXiSDTw6kLM,8943 +networkx/algorithms/shortest_paths/dense.py,sha256=854OX-Y9ezrJuAR_VNyCT6DXeG_b9IrvkJHwiMDEvvY,8167 +networkx/algorithms/shortest_paths/generic.py,sha256=dl3JJ-ByQheVSnkNNgcMDw0toFv-s1A-1EGwJ8hdkPY,25734 +networkx/algorithms/shortest_paths/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/shortest_paths/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_astar.cpython-310.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_dense.cpython-310.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_dense_numpy.cpython-310.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_generic.cpython-310.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_unweighted.cpython-310.pyc,, +networkx/algorithms/shortest_paths/tests/__pycache__/test_weighted.cpython-310.pyc,, +networkx/algorithms/shortest_paths/tests/test_astar.py,sha256=G9hrEo2U9c_kzaRTAXYbS1TpcJgF_uqj9249K2qbjAY,8941 +networkx/algorithms/shortest_paths/tests/test_dense.py,sha256=ievl4gu3Exl_31hp4OKcsAGPb3g3_xFUM4t3NnvrG_A,6747 +networkx/algorithms/shortest_paths/tests/test_dense_numpy.py,sha256=BNwXCe2wgNPE8o35-shPsFj8l19c_QG6Ye8tkIGphf8,2300 +networkx/algorithms/shortest_paths/tests/test_generic.py,sha256=oJBKCLIsMA1KTo8q-oG9JQmaxysc7_QSgbBqMImh23c,18456 +networkx/algorithms/shortest_paths/tests/test_unweighted.py,sha256=fjpDkp38DmW8R2qpLRwRjcbYZp4an0f0yIq40XsFKJ8,5899 +networkx/algorithms/shortest_paths/tests/test_weighted.py,sha256=dmzFBYN3QEDZoun7RAtSe_spsGSbvkDiJSgUf9e-1K8,35038 +networkx/algorithms/shortest_paths/unweighted.py,sha256=pnRA7LPMl-vC2lELBHOU1kebRLtgFFsNazYoP1TNpkM,15617 +networkx/algorithms/shortest_paths/weighted.py,sha256=ZT1IFJvDrO4inPci8iVXTteEJBvv9D48lRQ2oEN2elc,82473 +networkx/algorithms/similarity.py,sha256=gPXADLC4HL48YJyzu_LFK9O_WQikZyIxLN_qmyC1h8c,60963 +networkx/algorithms/simple_paths.py,sha256=0kWc6qusbdXHklJyDxh6dj2-tuU9NRJuiO9DJN1vveg,29610 +networkx/algorithms/smallworld.py,sha256=ZQtiv1sBCTTyNUgOSH01gr9lTGXQ42WaotqjcsRWjjI,13564 +networkx/algorithms/smetric.py,sha256=NGq0LyAMOa2A4yuNTigrgaR7HDI8wThqNu0tK68hGs8,1937 +networkx/algorithms/sparsifiers.py,sha256=tL35uuBi8Wz52xAO3nScrzXn0HSZR2SRpDS6q7pLpe0,10047 +networkx/algorithms/structuralholes.py,sha256=CS89P45_m1JGFGnSGA-FlC2xnt0BYq3O5ky1zkjYEDI,9342 +networkx/algorithms/summarization.py,sha256=ARCsA8WC3SPgLwngVvlVsff5XfmuHAWIfscrnWtPQzY,23250 +networkx/algorithms/swap.py,sha256=9OEp1YlPz29AC22O6K51xVmqaYmT1chx0kCVpLg6ddM,14745 +networkx/algorithms/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_asteroidal.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_boundary.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_bridges.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_broadcasting.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_chains.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_chordal.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_clique.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_cluster.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_communicability.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_core.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_covering.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_cuts.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_cycles.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_d_separation.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_dag.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_distance_measures.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_distance_regular.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_dominance.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_dominating.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_efficiency.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_euler.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_graph_hashing.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_graphical.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_hierarchy.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_hybrid.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_isolate.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_link_prediction.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_lowest_common_ancestors.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_matching.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_max_weight_clique.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_mis.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_moral.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_node_classification.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_non_randomness.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_planar_drawing.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_planarity.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_polynomials.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_reciprocity.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_regular.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_richclub.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_similarity.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_simple_paths.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_smallworld.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_smetric.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_sparsifiers.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_structuralholes.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_summarization.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_swap.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_threshold.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_time_dependent.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_tournament.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_triads.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_vitality.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_voronoi.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_walks.cpython-310.pyc,, +networkx/algorithms/tests/__pycache__/test_wiener.cpython-310.pyc,, +networkx/algorithms/tests/test_asteroidal.py,sha256=DnWI5_jnaaZMxtG44XD0K690HZs8ez7HU_9dSR-p6eA,502 +networkx/algorithms/tests/test_boundary.py,sha256=1OSJh32FYFhAVYB5zqxhZGEXZLS0HPp9kvfHZvWmD3o,6227 +networkx/algorithms/tests/test_bridges.py,sha256=jSCguECho0GNHnu0vpRh1twyfGP6tWFcaYL1rgvc8mU,4026 +networkx/algorithms/tests/test_broadcasting.py,sha256=HGllt9dPTZPE7okbmXdxkL_gr8wVqgbANj1AxeRNb5I,2020 +networkx/algorithms/tests/test_chains.py,sha256=SofaAxDEJDf1gt5sIGVC_O8vT9YcTc8Jq1vfnwVPhkM,4363 +networkx/algorithms/tests/test_chordal.py,sha256=DPdNPY7KtqCsCwYVb4xQfnIm-z35dUJIWxNHtAiQLAQ,4438 +networkx/algorithms/tests/test_clique.py,sha256=FPIF2f8NLODsz-k_qrHt7DolClV_VdNWSh68oe8-ygI,9413 +networkx/algorithms/tests/test_cluster.py,sha256=CzYPJm4QY5SL-amMNh2ItPgQ-FjePPG9EBfIKOZHp6s,15883 +networkx/algorithms/tests/test_communicability.py,sha256=4KK9wU9gAUqHAAAyHwAKpq2dV9g415s_X0qd7Tt83gU,2938 +networkx/algorithms/tests/test_core.py,sha256=CF7YPX3F2pUtBu2sp4ZEAGRldaBkdgr1ufk6UkrETuA,9555 +networkx/algorithms/tests/test_covering.py,sha256=EeBjQ5mxcVctgavqXZ255T8ryFocuxjxdVpIxVUNFvw,2718 +networkx/algorithms/tests/test_cuts.py,sha256=2Ir5xyIG4cTC4Dgg1cceLXaEFiOCJ60ZTDDn33vz0Ns,5377 +networkx/algorithms/tests/test_cycles.py,sha256=dr3IWIiJuhqDi3s8dcSv1PQn-nBh3I3RGHn6jOcRuos,34416 +networkx/algorithms/tests/test_d_separation.py,sha256=ZypzMVDpBZo_4qBlieFlj3RVU6vh7tejEZGlu7qcQbc,10929 +networkx/algorithms/tests/test_dag.py,sha256=oNkUci8iRFdxES3sD9HQe3oJBIGyyPfprWlQAtNfvYU,27930 +networkx/algorithms/tests/test_distance_measures.py,sha256=8d51TtvvlM1m4RDUsaXlrxOV1CnK35HGQVtMS0myxNU,25522 +networkx/algorithms/tests/test_distance_regular.py,sha256=w27OTUtAI0VQv7cikkOdJg4bo4q7xTNIVE8nbU_x7b8,2915 +networkx/algorithms/tests/test_dominance.py,sha256=nPqRGSF1GEvUR16ryo-dOql6fLdTvzBmYk8Y3ML-ONc,9373 +networkx/algorithms/tests/test_dominating.py,sha256=hyta7ln6BbHaGlpEUla6jVzh2PRuSjvujLSGXrmwZbc,1228 +networkx/algorithms/tests/test_efficiency.py,sha256=QKWMvyjCG1Byt-oNp7Rz_qxnVeT77Zk27lrzI1qH0mA,1894 +networkx/algorithms/tests/test_euler.py,sha256=L4L1ljHVxQxjQQludO2r6k3UZU7WAY_N6WYUjFx1fEk,11209 +networkx/algorithms/tests/test_graph_hashing.py,sha256=MqRwsNbyRWUy94V7UuDqEREuHxFTSn7-d0HzwSDI2As,24534 +networkx/algorithms/tests/test_graphical.py,sha256=uhFjvs04odxABToY4IRig_CaUTpAC3SfZRu1p1T7FwY,5366 +networkx/algorithms/tests/test_hierarchy.py,sha256=g3-0pNfzRo-RDW1BsiLXxyi2LwWIJukXx2i4JCpN2fg,941 +networkx/algorithms/tests/test_hybrid.py,sha256=kQLzaMoqZcKFaJ3D7PKbY2O-FX59XDZ1pN5un8My-tk,720 +networkx/algorithms/tests/test_isolate.py,sha256=LyR0YYHJDH5vppQzGzGiJK-aaIV17_Jmla8dMf93olg,555 +networkx/algorithms/tests/test_link_prediction.py,sha256=Jah4vOGDYcWaPSl_iG-0fOXnhu5o8f6wcfakRmWuX7I,20004 +networkx/algorithms/tests/test_lowest_common_ancestors.py,sha256=GvhYCQMnVYD9LHPCNFgWMAUmOV8V5gko0fe05zi1JwU,13153 +networkx/algorithms/tests/test_matching.py,sha256=jhehNkApE5RuMPtbjWNeHn0tPqhVz65mL7QakfRA3Vw,20174 +networkx/algorithms/tests/test_max_weight_clique.py,sha256=JWGZpbQfUaCklCGI170Gfpp3b5ICYwY7RH_DQ1mYQbc,6741 +networkx/algorithms/tests/test_mis.py,sha256=Z2tKoqbs-AFPzEBDYO7S8U-F7usLfZJ2l6j2DpZUts4,1865 +networkx/algorithms/tests/test_moral.py,sha256=15PZgkx7O9aXQB1npQ2JNqBBkEqPPP2RfeZzKqY-GNU,452 +networkx/algorithms/tests/test_node_classification.py,sha256=NgJJKUHH1GoD1GE3F4QRYBLM3fUo_En3RNtZvhqCjlg,4663 +networkx/algorithms/tests/test_non_randomness.py,sha256=-8s-fJLYRxVNp7QpaMe5Dxrxi0kvewY78d4ja-nXNBk,782 +networkx/algorithms/tests/test_planar_drawing.py,sha256=NN55y2cs9IdZYwUsG-RbI07aGSMx5gp5vnmGLC2vopo,8765 +networkx/algorithms/tests/test_planarity.py,sha256=rrIGX28JoG_DqINsuY4TSdDloxnz4dkCd3xeRo9Svqs,16386 +networkx/algorithms/tests/test_polynomials.py,sha256=baI0Kua1pRngRC6Scm5gRRwi1bl0iET5_Xxo3AZTP3A,1983 +networkx/algorithms/tests/test_reciprocity.py,sha256=X_PXWFOTzuEcyMWpRdwEJfm8lJOfNE_1rb9AAybf4is,1296 +networkx/algorithms/tests/test_regular.py,sha256=5KGvwhixanEigI0KgeUJ1hWPw7YRGZgNbrMkKcndd5M,2626 +networkx/algorithms/tests/test_richclub.py,sha256=ql_j69gIoph8d6oD2tzDqu3b-uW884nmEJZQmWANR6k,3965 +networkx/algorithms/tests/test_similarity.py,sha256=BV5f4DiSQHPsXkSosf29idxGQ_wLiTwEsiHtgDOLLw4,33189 +networkx/algorithms/tests/test_simple_paths.py,sha256=e750_1aTMNJ2NIHo83xfLDkK9UzmlYkTu9Rp54eDI2c,24839 +networkx/algorithms/tests/test_smallworld.py,sha256=rfgNCRU6YF55f8sCuA5WmX6MmhDci89Tb4jaz4ALjcQ,2405 +networkx/algorithms/tests/test_smetric.py,sha256=wihpgjZS4PaajOuE72RiDEbBWpQcoKPSAfjoAezuRxg,980 +networkx/algorithms/tests/test_sparsifiers.py,sha256=A12V4ljWxvXaSFJ73mHSFK2YNO-k8ax6Me4yEWTsI4s,4043 +networkx/algorithms/tests/test_structuralholes.py,sha256=mxlgheGz-4HbnWm328pZynzIBJYIukXDp9AxmHqrsLE,5540 +networkx/algorithms/tests/test_summarization.py,sha256=cGAep6r-v141uAdsPF9r8YTuT-nO7L7puOqPPv339wo,21313 +networkx/algorithms/tests/test_swap.py,sha256=rrvKwedIuqq7Q2Ell-yYZKoYyq6IBkrG4Y-GOc2QFrQ,6121 +networkx/algorithms/tests/test_threshold.py,sha256=RF_SM5tdMGJfEHETO19mFicnt69UIlvVeuCwI7rxb0M,9751 +networkx/algorithms/tests/test_time_dependent.py,sha256=NmuV2kDo4nh2MeN0hwcJf0QSDtqMD0dfSeeKSsYBtQ8,13342 +networkx/algorithms/tests/test_tournament.py,sha256=xxmLb9Lrmjkh9tKmyv2yYJrhB2PHWh-Bq71M-d1NjQo,4158 +networkx/algorithms/tests/test_triads.py,sha256=anSuYt1ZmV0_aGtSPLl5YxEQZHOuo0QndNADUdZKqdY,9383 +networkx/algorithms/tests/test_vitality.py,sha256=p5lPWCtVMtbvxDw6TJUaf8vpb0zKPoz5pND722xiypQ,1380 +networkx/algorithms/tests/test_voronoi.py,sha256=M4B6JtkJUw56ULEWRs1kyVEUsroNrnb5FBq9OioAyHM,3477 +networkx/algorithms/tests/test_walks.py,sha256=X8cb-YvGHiiqbMEXuKMSdTAb9WtVtbHjIESNSqpJTmU,1499 +networkx/algorithms/tests/test_wiener.py,sha256=k9ld7wdPq5knS6cjo0hja8aWL-cdxYKGRpDU0z3cvNI,3209 +networkx/algorithms/threshold.py,sha256=JYMM4wrtdQpzw-_L9VYSr3ACVLI8Iu_1p-uK6dWdQ_w,31149 +networkx/algorithms/time_dependent.py,sha256=PAeJ7Yt8kUqbDgvBaz_ZfUFZg-w-vf1gPC0HO6go_TI,5762 +networkx/algorithms/tournament.py,sha256=khYrCbO5GfnRWYtCrEhmSA7ldGnUQC45RQxh6cJmhuk,11766 +networkx/algorithms/traversal/__init__.py,sha256=YtFrfNjciqTOI6jGePQaJ01tRSEQXTHqTGGNhDEDb_8,142 +networkx/algorithms/traversal/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/traversal/__pycache__/beamsearch.cpython-310.pyc,, +networkx/algorithms/traversal/__pycache__/breadth_first_search.cpython-310.pyc,, +networkx/algorithms/traversal/__pycache__/depth_first_search.cpython-310.pyc,, +networkx/algorithms/traversal/__pycache__/edgebfs.cpython-310.pyc,, +networkx/algorithms/traversal/__pycache__/edgedfs.cpython-310.pyc,, +networkx/algorithms/traversal/beamsearch.py,sha256=dTsm_57uhq2NlScvJ-0j6lkQpS9wtwRd4tS2YU6_yzI,3472 +networkx/algorithms/traversal/breadth_first_search.py,sha256=1vo0kFbEDMkyVDDRMxiQ4TIIO5NjpnKbOu7dcFh_WGc,19241 +networkx/algorithms/traversal/depth_first_search.py,sha256=X6IvDAjIrtrNvCu3n8arkx3bqCeEaaUodCkXlGP9sa0,16794 +networkx/algorithms/traversal/edgebfs.py,sha256=zKqwV4s_mxa3Y4nTYaT9I_UiUAYLGk8ru34oCpnaatM,6243 +networkx/algorithms/traversal/edgedfs.py,sha256=g-aIZ7mEc88bI0FETnsL-50cW0lHSdNP7rz25j1oBIo,5956 +networkx/algorithms/traversal/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/traversal/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_beamsearch.cpython-310.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_bfs.cpython-310.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_dfs.cpython-310.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_edgebfs.cpython-310.pyc,, +networkx/algorithms/traversal/tests/__pycache__/test_edgedfs.cpython-310.pyc,, +networkx/algorithms/traversal/tests/test_beamsearch.py,sha256=cGXGwJU_9jxNtzU8EsOX6TyoA1rKM_CfczESIlG_K8c,899 +networkx/algorithms/traversal/tests/test_bfs.py,sha256=fC6HUKzd5Jd9LerxgODpfvCRE15BU5PbMzEaMLoXPZs,6796 +networkx/algorithms/traversal/tests/test_dfs.py,sha256=EqLV_C-3frQ89C-SD0jtHvWEankNfPXm6M76JDdenq0,10604 +networkx/algorithms/traversal/tests/test_edgebfs.py,sha256=8oplCu0fct3QipT0JB0-292EA2aOm8zWlMkPedfe6iY,4702 +networkx/algorithms/traversal/tests/test_edgedfs.py,sha256=HGmC3GUYSn9XLMHQpdefdE6g-Uh3KqbmgEEXBcckdYc,4775 +networkx/algorithms/tree/__init__.py,sha256=wm_FjX3G7hqJfyNmeEaJsRjZI-8Kkv0Nb5jAmQNXzSc,149 +networkx/algorithms/tree/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/tree/__pycache__/branchings.cpython-310.pyc,, +networkx/algorithms/tree/__pycache__/coding.cpython-310.pyc,, +networkx/algorithms/tree/__pycache__/decomposition.cpython-310.pyc,, +networkx/algorithms/tree/__pycache__/mst.cpython-310.pyc,, +networkx/algorithms/tree/__pycache__/operations.cpython-310.pyc,, +networkx/algorithms/tree/__pycache__/recognition.cpython-310.pyc,, +networkx/algorithms/tree/branchings.py,sha256=xXTh3csPHe8su4hFeIHLNN_W1_Tg6cowZR5OjlgQr30,56350 +networkx/algorithms/tree/coding.py,sha256=RWBC-UzKt86RZ78jBuS-4qJkYPLB4oy-hgZGWcqjR_Q,13463 +networkx/algorithms/tree/decomposition.py,sha256=lY_rqx9JxnLEkp1wiAv0mX62PGPwGQ6SW4Jp48o8aiw,3071 +networkx/algorithms/tree/mst.py,sha256=t58j4OhKQvd-SMT5iraZs3p3qy-5xL-E8gwZtsRKB3Y,45918 +networkx/algorithms/tree/operations.py,sha256=WQRgFl8sYImezZHLHwwnp9cqrwHYh2-aiUy1VUUMzW8,4726 +networkx/algorithms/tree/recognition.py,sha256=bYnaDN0ZaIWTgq0tbPEHAcdxQBWZpDvWypZarBbA334,7569 +networkx/algorithms/tree/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/algorithms/tree/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_branchings.cpython-310.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_coding.cpython-310.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_decomposition.cpython-310.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_mst.cpython-310.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_operations.cpython-310.pyc,, +networkx/algorithms/tree/tests/__pycache__/test_recognition.cpython-310.pyc,, +networkx/algorithms/tree/tests/test_branchings.py,sha256=kcC49jNRncSPNAQhgHRYIAu207nScB-jObPq1WmaQeM,18999 +networkx/algorithms/tree/tests/test_coding.py,sha256=f3A5dvfkWImC6Jp2qkuw2Sz3whOsabnaOfu6Eh9r65I,3954 +networkx/algorithms/tree/tests/test_decomposition.py,sha256=vnl_xoQzi1LnlZL25vXOZWwvaWmon3-x222OKt4eDqE,1871 +networkx/algorithms/tree/tests/test_mst.py,sha256=_Nz7vPuQFetiPNZIHEZFuoFXPhVyr0wYbDdW_xtcMNQ,29544 +networkx/algorithms/tree/tests/test_operations.py,sha256=ybU96kROTVJRTyjLG7JSJjYlPxaWmYjUVJqbXV5VGGI,1961 +networkx/algorithms/tree/tests/test_recognition.py,sha256=qeMEIvg-j2MqaU-TNIQhCcXxao8vTBy0wjpU7jr2iw8,4521 +networkx/algorithms/triads.py,sha256=Rtxi5G9YialRFPZ9IR-z0nuppcSyCulJVQmQany6oac,16852 +networkx/algorithms/vitality.py,sha256=D4DfvQ7Egise4wMwRVQB-vBvYPovVbgh9kFEOOhgkU0,2335 +networkx/algorithms/voronoi.py,sha256=aNt5XTrD8bEkaey1Tp88FopoDOXLWVN_RovT66U9EAM,3182 +networkx/algorithms/walks.py,sha256=_aCy0RmrK2i2vgpqG3ZZcg-MsK6j65DNFzUHz0hmXe8,2428 +networkx/algorithms/wiener.py,sha256=el5cD8ZO-wEjtcjMcgY6bSENIPd6JXEMtHLKb-z9h44,7640 +networkx/classes/__init__.py,sha256=Q9oONJrnTFs874SGpwcbV_kyJTDcrLI69GFt99MiE6I,364 +networkx/classes/__pycache__/__init__.cpython-310.pyc,, +networkx/classes/__pycache__/coreviews.cpython-310.pyc,, +networkx/classes/__pycache__/digraph.cpython-310.pyc,, +networkx/classes/__pycache__/filters.cpython-310.pyc,, +networkx/classes/__pycache__/function.cpython-310.pyc,, +networkx/classes/__pycache__/graph.cpython-310.pyc,, +networkx/classes/__pycache__/graphviews.cpython-310.pyc,, +networkx/classes/__pycache__/multidigraph.cpython-310.pyc,, +networkx/classes/__pycache__/multigraph.cpython-310.pyc,, +networkx/classes/__pycache__/reportviews.cpython-310.pyc,, +networkx/classes/coreviews.py,sha256=Qu6kupOVVBXKOUFBkXOh-4YQEuPL6d6VPyJEaZC5beE,12414 +networkx/classes/digraph.py,sha256=_8gYUKVISvFRxIifD8raxE_PoEtUxL3GrqepC2NM9kI,47496 +networkx/classes/filters.py,sha256=yVoFHVQ7O9895SzVbOgPMNTGH3vWg5apEueDHUXTi_k,2501 +networkx/classes/function.py,sha256=5Ir24Zoa7woLMDB00ux__mFm6I7x6FCH9QT6C0BmYFg,36945 +networkx/classes/graph.py,sha256=gV2zvjNakmTdLjjh3RgUaT64hFigJpEtzxGCCW9Udkw,70794 +networkx/classes/graphviews.py,sha256=xmSeUXcSPamE0GSr8VNm7NnyjDl2e34fJHs1AXUgNsc,8588 +networkx/classes/multidigraph.py,sha256=v5dSRzS8c1pWdrgf0ONaCnRCRLjUWLvNuT7sID0o-Bk,36350 +networkx/classes/multigraph.py,sha256=OFkma1MfIb5BgiIbn-USmUWs80rw_Esf4DtPSbS_saE,47247 +networkx/classes/reportviews.py,sha256=KLZ9v26LsxR17iKmcLhvLLbc3fLMnWcW1yu0UlntT3s,45859 +networkx/classes/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/classes/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/classes/tests/__pycache__/dispatch_interface.cpython-310.pyc,, +networkx/classes/tests/__pycache__/historical_tests.cpython-310.pyc,, +networkx/classes/tests/__pycache__/test_coreviews.cpython-310.pyc,, +networkx/classes/tests/__pycache__/test_digraph.cpython-310.pyc,, +networkx/classes/tests/__pycache__/test_digraph_historical.cpython-310.pyc,, +networkx/classes/tests/__pycache__/test_filters.cpython-310.pyc,, +networkx/classes/tests/__pycache__/test_function.cpython-310.pyc,, +networkx/classes/tests/__pycache__/test_graph.cpython-310.pyc,, +networkx/classes/tests/__pycache__/test_graph_historical.cpython-310.pyc,, +networkx/classes/tests/__pycache__/test_graphviews.cpython-310.pyc,, +networkx/classes/tests/__pycache__/test_multidigraph.cpython-310.pyc,, +networkx/classes/tests/__pycache__/test_multigraph.cpython-310.pyc,, +networkx/classes/tests/__pycache__/test_reportviews.cpython-310.pyc,, +networkx/classes/tests/__pycache__/test_special.cpython-310.pyc,, +networkx/classes/tests/__pycache__/test_subgraphviews.cpython-310.pyc,, +networkx/classes/tests/dispatch_interface.py,sha256=eYzuKdfBifMDxcrxHeOLGZgkVghVZFr9SyW7QRdYcqI,6687 +networkx/classes/tests/historical_tests.py,sha256=3lbZKaRvv8uodIEzSbBJDguTPpO2MhqBqh-Pk1soZBM,16173 +networkx/classes/tests/test_coreviews.py,sha256=qzdozzWK8vLag-CAUqrXAM2CZZwMFN5vMu6Tdrwdf-E,12128 +networkx/classes/tests/test_digraph.py,sha256=uw0FuEu3y_YI-PSGuQCRytFpXLF7Eye2fqLJaKbXkBc,12283 +networkx/classes/tests/test_digraph_historical.py,sha256=s9FpuIP81zIbGCiMfiDqB3OxqWU2p3GwWdhpGIOjD5Y,3683 +networkx/classes/tests/test_filters.py,sha256=fBLig8z548gsBBlQw6VJdGZb4IcqJj7_0mi2Fd2ncEM,5851 +networkx/classes/tests/test_function.py,sha256=b1XQeKUn9N-TbIHH92iFbvuz023CBfwFE6SBburJHBw,25842 +networkx/classes/tests/test_graph.py,sha256=77t7pk1Pmz-txewyD2Dv19Vva6vWpWCtJSPtFx-EY_Y,30913 +networkx/classes/tests/test_graph_historical.py,sha256=-jf961vQCuQLyly0ju50q9dbzWG5m2OAs9H6IVS670c,273 +networkx/classes/tests/test_graphviews.py,sha256=i4x3ii8--PPg_pK4YA8aMR1axUQCdXZYpzmB05iEAOg,11466 +networkx/classes/tests/test_multidigraph.py,sha256=ryTKegCoYixXbAqOn3mIt9vSMb5666Dv-pfMkXEjoUE,16342 +networkx/classes/tests/test_multigraph.py,sha256=0vFQO3RCJaBpzXvnQzdWa_qYLHNo_I9DICYhPZJNUMk,18777 +networkx/classes/tests/test_reportviews.py,sha256=2bTAKetjhHvlca48GN-qYY1V_Rnz16wBi9UT7DeAcXo,41633 +networkx/classes/tests/test_special.py,sha256=IJsmqCS9LrTDoZ11KPmo-UOI7xEskL7NyduEJNPMNqs,4103 +networkx/classes/tests/test_subgraphviews.py,sha256=1dcJHq3F00LyoFSu6CTFPqS7DFIkWK1PyQu4QvJh5ko,13223 +networkx/conftest.py,sha256=ULCWJLM55y0zfP8maAi9rq-DnkFc7XCe5h_Y9QHI5yo,8819 +networkx/convert.py,sha256=YWmnP_BD6EH6BlqtARtQ1Zclv_pzLe_Ks4gp--CE9nY,16027 +networkx/convert_matrix.py,sha256=K3134LniasiPU0a9QqBnwMLMYP4kuHcM06zw2A9jQHE,41409 +networkx/drawing/__init__.py,sha256=rnTFNzLc4fis1hTAEpnWTC80neAR88-llVQ-LObN-i4,160 +networkx/drawing/__pycache__/__init__.cpython-310.pyc,, +networkx/drawing/__pycache__/layout.cpython-310.pyc,, +networkx/drawing/__pycache__/nx_agraph.cpython-310.pyc,, +networkx/drawing/__pycache__/nx_latex.cpython-310.pyc,, +networkx/drawing/__pycache__/nx_pydot.cpython-310.pyc,, +networkx/drawing/__pycache__/nx_pylab.cpython-310.pyc,, +networkx/drawing/layout.py,sha256=K0875d7Bp5Odi7tQh8sKRGKwWe-MLIgkm3pbzjoSmuw,40753 +networkx/drawing/nx_agraph.py,sha256=gn84HupOV7aD3VDlM2aIdJKubqWFCYdAz5L7Bsbv8fk,14004 +networkx/drawing/nx_latex.py,sha256=_WWVtu_dmBTZBlbzXzOUxhpgpduw6Zri9m-d2JAb7ys,24804 +networkx/drawing/nx_pydot.py,sha256=Kacu6HIuFMXggWOm-JwFSntJ5m3upxmvs9IIqxuc4KQ,12357 +networkx/drawing/nx_pylab.py,sha256=UopFL5Ct7SUtOT6me1pXd6NqhxbnLFv6rXRa_uqeafY,61617 +networkx/drawing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/drawing/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/drawing/tests/__pycache__/test_agraph.cpython-310.pyc,, +networkx/drawing/tests/__pycache__/test_latex.cpython-310.pyc,, +networkx/drawing/tests/__pycache__/test_layout.cpython-310.pyc,, +networkx/drawing/tests/__pycache__/test_pydot.cpython-310.pyc,, +networkx/drawing/tests/__pycache__/test_pylab.cpython-310.pyc,, +networkx/drawing/tests/baseline/test_house_with_colors.png,sha256=FQi9pIRFwjq4gvgB8cDdBHL5euQUJFw6sQlABf2kRVo,21918 +networkx/drawing/tests/test_agraph.py,sha256=NvisvEgqusj1bY0CUXezpwnXrSxubpAe6GHTk2bAc1A,8788 +networkx/drawing/tests/test_latex.py,sha256=_Wng73kMltC-_sUoxdo2uBL2bkEc7HMqkKhwo9ZDJGA,8710 +networkx/drawing/tests/test_layout.py,sha256=C37dhOVDxUIabaWUyWp7q22EpdvmRC9-AjatAjomSyc,19801 +networkx/drawing/tests/test_pydot.py,sha256=ytsZ2iiAqXs8KETF2e19WPwQMMDtDLCurVS7s3L7TJg,6107 +networkx/drawing/tests/test_pylab.py,sha256=zKiVm4dQHIIHsnVnqLay2Tc4wF2UbB90-XRexpXnfpU,30412 +networkx/exception.py,sha256=5v8tPTpYcuu3OFgSitgC8-wMUGNwfgxZog2gsBNeRPk,3537 +networkx/generators/__init__.py,sha256=3p86E_yn54BQYlDleuN9APncLNrPsX4F3IoyMeKJOtU,1365 +networkx/generators/__pycache__/__init__.cpython-310.pyc,, +networkx/generators/__pycache__/atlas.cpython-310.pyc,, +networkx/generators/__pycache__/classic.cpython-310.pyc,, +networkx/generators/__pycache__/cographs.cpython-310.pyc,, +networkx/generators/__pycache__/community.cpython-310.pyc,, +networkx/generators/__pycache__/degree_seq.cpython-310.pyc,, +networkx/generators/__pycache__/directed.cpython-310.pyc,, +networkx/generators/__pycache__/duplication.cpython-310.pyc,, +networkx/generators/__pycache__/ego.cpython-310.pyc,, +networkx/generators/__pycache__/expanders.cpython-310.pyc,, +networkx/generators/__pycache__/geometric.cpython-310.pyc,, +networkx/generators/__pycache__/harary_graph.cpython-310.pyc,, +networkx/generators/__pycache__/internet_as_graphs.cpython-310.pyc,, +networkx/generators/__pycache__/intersection.cpython-310.pyc,, +networkx/generators/__pycache__/interval_graph.cpython-310.pyc,, +networkx/generators/__pycache__/joint_degree_seq.cpython-310.pyc,, +networkx/generators/__pycache__/lattice.cpython-310.pyc,, +networkx/generators/__pycache__/line.cpython-310.pyc,, +networkx/generators/__pycache__/mycielski.cpython-310.pyc,, +networkx/generators/__pycache__/nonisomorphic_trees.cpython-310.pyc,, +networkx/generators/__pycache__/random_clustered.cpython-310.pyc,, +networkx/generators/__pycache__/random_graphs.cpython-310.pyc,, +networkx/generators/__pycache__/small.cpython-310.pyc,, +networkx/generators/__pycache__/social.cpython-310.pyc,, +networkx/generators/__pycache__/spectral_graph_forge.cpython-310.pyc,, +networkx/generators/__pycache__/stochastic.cpython-310.pyc,, +networkx/generators/__pycache__/sudoku.cpython-310.pyc,, +networkx/generators/__pycache__/time_series.cpython-310.pyc,, +networkx/generators/__pycache__/trees.cpython-310.pyc,, +networkx/generators/__pycache__/triads.cpython-310.pyc,, +networkx/generators/atlas.dat.gz,sha256=c_xBbfAWSSNgd1HLdZ9K6B3rX2VQvyW-Wcht47dH5B0,8887 +networkx/generators/atlas.py,sha256=CL33scmzOqboyrume3Auxi_kxmpPoPWhlTIi5hOOUbc,5605 +networkx/generators/classic.py,sha256=GO6aoVotzUl4UwO9owgVUajYueq5tusMwXBDTZHK8fI,31576 +networkx/generators/cographs.py,sha256=BWbTZ7uW2LTsexUx6iDiwNAzq7iiyRu8FB4B74d0NZU,1890 +networkx/generators/community.py,sha256=7si2tkO75yBYyUHsJuKrF3D3-hT7BSdU9YHPOSfMvCY,34910 +networkx/generators/degree_seq.py,sha256=kuU3wy2J5UEkWzkXyyxxFxHvs7HMcBWiKZSS6TLPYZ4,30174 +networkx/generators/directed.py,sha256=Vcg0zeWFS2-F99bFmhXj4mzlCy_yoBuuqjnSx5I-Dco,15696 +networkx/generators/duplication.py,sha256=ltUICmWTEN0eYLN-TPx6x8mJSPgmIysoTIUaKeTPxI4,5051 +networkx/generators/ego.py,sha256=MXaJqqPVPWE8n9sTfeKePAmuqtS5u2pL1GvRQ2Gf8Y0,1899 +networkx/generators/expanders.py,sha256=FpUynvzKFmn4zxyhCIAuiX2cXPX2tcRA6GzjQi6KfRM,14456 +networkx/generators/geometric.py,sha256=7sna0Q9pfJdYkVhNAXBWMNkaU1sESn39y3CxSSCDtEQ,39589 +networkx/generators/harary_graph.py,sha256=N6vzXKrW-ZU-xDc2ZTF_Gf7kb0LRQVRfK2oLBQvyVO8,6159 +networkx/generators/internet_as_graphs.py,sha256=Y_pQaGhe183X6dXH4ocqIK3DzXRz0oXE-AKwsL1yCHk,14172 +networkx/generators/intersection.py,sha256=1dSnFp58EDbTVBFXHTvmJdeV3lhlO48XgxhkJf2TTF8,4100 +networkx/generators/interval_graph.py,sha256=EdPD9zonEWGTqpdlrlBRZ1OXzwo8ft9g_MdAfLxJ_ME,2203 +networkx/generators/joint_degree_seq.py,sha256=nyp86NC_4XvzvwpwwzKrrCSz1i_4bESSDtVjWvpkWFg,24773 +networkx/generators/lattice.py,sha256=kVCvTahWPQGNbok6maXfaqGzm88UuxhP7D9BkKhGW1o,13500 +networkx/generators/line.py,sha256=vQ0BnlCqeVf3p3CqZ4Et_GKsv__km4HyEYQtoD0Oaa8,17530 +networkx/generators/mycielski.py,sha256=xBX2m77sCzumoH5cAGitksvEEW-ocbCnbdaN7fKUtVk,3314 +networkx/generators/nonisomorphic_trees.py,sha256=gE7uPB-uaE6rEfaimmR9bqobso5yclcCG6u8zwZlS48,6453 +networkx/generators/random_clustered.py,sha256=6B-XK5BqDsfy11dMXb1H0mGhjpo-oePPHImSU-hJYxA,4183 +networkx/generators/random_graphs.py,sha256=6b6XqaqD7YOPEREdKAYFZuXUU-b0lEsrg8IUbqxZI7M,45097 +networkx/generators/small.py,sha256=Xs9JNTtoLiShg7fF7_VRJ-G18JGSt4JEMmhhtpS51r8,28171 +networkx/generators/social.py,sha256=UmMU8WRi0udN5pxvMctmCNZQtsF_k7Mavj4Bt3BQmfM,22963 +networkx/generators/spectral_graph_forge.py,sha256=kt1QgeZmZE2nWSxy_79FJVRGbzMsYSGVvMuCaAtY1tQ,4241 +networkx/generators/stochastic.py,sha256=Qg9vWm9EOug2OQVIHL_dZ5HrXc16lxnWyzX52KWNEPI,1981 +networkx/generators/sudoku.py,sha256=kLM2AP0H4966uYiNO1oAFEmv5qBftU_bOfYucRxexM0,4288 +networkx/generators/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/generators/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_atlas.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_classic.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_cographs.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_community.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_degree_seq.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_directed.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_duplication.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_ego.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_expanders.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_geometric.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_harary_graph.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_internet_as_graphs.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_intersection.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_interval_graph.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_joint_degree_seq.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_lattice.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_line.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_mycielski.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_nonisomorphic_trees.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_random_clustered.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_random_graphs.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_small.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_spectral_graph_forge.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_stochastic.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_sudoku.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_time_series.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_trees.cpython-310.pyc,, +networkx/generators/tests/__pycache__/test_triads.cpython-310.pyc,, +networkx/generators/tests/test_atlas.py,sha256=nwXJL4O5jUqhTwqhkPxHY8s3KXHQTDEdsfbg4MsSzVQ,2530 +networkx/generators/tests/test_classic.py,sha256=o4EfLc7VqFw3NeWrZ5Ooy8FHZXHfZW0qX2p8Hs5xK4o,23413 +networkx/generators/tests/test_cographs.py,sha256=DkiQzP69sjw3QtjWVX2XV0EXoOuEvR42dixPWwuawSE,460 +networkx/generators/tests/test_community.py,sha256=FGcDo3Ajb-yYc5kUkFbVfOJVMG-YppbAtjgBPcVzjLc,11311 +networkx/generators/tests/test_degree_seq.py,sha256=in6lg1pwcAg1N08MA3lQdr3lnm2-aoUy3BRm6Yj_OBQ,7093 +networkx/generators/tests/test_directed.py,sha256=00widU8dJGkdnU_b6-ZxL8KGtx-gSh4sRG7cwbMHvjQ,5258 +networkx/generators/tests/test_duplication.py,sha256=USHcHajtfhh16W-6i2_e7rW6bi81YC6Dc562P-wxiTc,2350 +networkx/generators/tests/test_ego.py,sha256=8v1Qjmkli9wIhhUuqzgqCzysr0C1Z2C3oJMCUoNvgY4,1327 +networkx/generators/tests/test_expanders.py,sha256=_dkrj2NFvZim9ZSZoehmfjJRfC0RsKUFSTDndXQM1sc,5604 +networkx/generators/tests/test_geometric.py,sha256=gnVm4dam_Er88YwaNpNZC6mjJjfgwMYhyLOtU9oPn1o,18087 +networkx/generators/tests/test_harary_graph.py,sha256=U5GfsoekBwVwTGMvk33e2eFOzHEL4czRIWv57j3nt_g,4937 +networkx/generators/tests/test_internet_as_graphs.py,sha256=QmzkOnWg9bcSrv31UcaD6Cko55AV-GPLLY5Aqb_Dmvs,6795 +networkx/generators/tests/test_intersection.py,sha256=hcIit5fKfOn3VjMhz9KqovZK9tzxZfmC6ezvA7gZAvM,819 +networkx/generators/tests/test_interval_graph.py,sha256=-1yXDZDW-ygmNva9Bu-TsS_SYGLcW1KJplwZHFFYyWM,4278 +networkx/generators/tests/test_joint_degree_seq.py,sha256=8TXTZI3Um2gBXtP-4yhGKf9vCi78-NVmWZw9r9WG3F8,4270 +networkx/generators/tests/test_lattice.py,sha256=q4Ri-dH9mKhfq0PNX9xMeYRUiP0JlPBr7piSruZlFlg,9290 +networkx/generators/tests/test_line.py,sha256=vXncJuny2j5ulCJyT01Rt1tTwPib4XelS3dJDdJXjx0,10378 +networkx/generators/tests/test_mycielski.py,sha256=fwZLO1ybcltRy6TzCel8tPBil1oZWv9QSXs779H6Xt0,946 +networkx/generators/tests/test_nonisomorphic_trees.py,sha256=nwATIcuBa2EVlR74koQMeEOA7MDPG8mpQIfDQ8LPxfs,2453 +networkx/generators/tests/test_random_clustered.py,sha256=SalHqWvpnXA3QrDRMjLx15dk2c4Us8Ck52clUERoUI8,1297 +networkx/generators/tests/test_random_graphs.py,sha256=DKEPbvKiFzZQsuofuj_MphGX2KJ8Bvz6ofIttDGMANk,13121 +networkx/generators/tests/test_small.py,sha256=K4-sSBZca3UMP1deUOWlkSzpanJBAT-vQdr11PMI_QY,7060 +networkx/generators/tests/test_spectral_graph_forge.py,sha256=x4jyTiQiydaUPWYaGsNFsIB47PAzSSwQYCNXGa2B4SU,1594 +networkx/generators/tests/test_stochastic.py,sha256=xdytPcz4ETnuqGtjMr0CI3zR4xWJqi91Zxbkly8Ijf8,2178 +networkx/generators/tests/test_sudoku.py,sha256=dgOmk-B7MxCVkbHdZzsLZppQ61FAArVy4McSVL8Afzo,1968 +networkx/generators/tests/test_time_series.py,sha256=74kHpcBfbed7zmd1Ofh2XoLIhIaEEFpEf51j1e2muMo,2229 +networkx/generators/tests/test_trees.py,sha256=hv8oNYZOcYcaARXvaMQZptCVBvk-huk-nKI5mH9sB-8,7634 +networkx/generators/tests/test_triads.py,sha256=mgpHFf0Z34CqtnXgkdf7gK1dC77ppYAqwviXsaU1HVs,332 +networkx/generators/time_series.py,sha256=-fKclBUnbqzBh-zKKgo96sdLuuj6l8q3svHO7yZ9HHw,2438 +networkx/generators/trees.py,sha256=Wra3uSUolTS2ugQIE42XiFeIHKbiyBmsZfqAXtSkpKU,39283 +networkx/generators/triads.py,sha256=W7DCEbPpC6My82YkXztfmk874he0SwscndAG5QlBSgA,2451 +networkx/lazy_imports.py,sha256=tYxP13tZ3p8-Qh--Mey4ZXZqQhWgQAbI7xYBZRrBzw0,5764 +networkx/linalg/__init__.py,sha256=7iyNZ_YYBnlsW8zSfhUgvEkywOrUWfpIuyS86ZOKlG8,568 +networkx/linalg/__pycache__/__init__.cpython-310.pyc,, +networkx/linalg/__pycache__/algebraicconnectivity.cpython-310.pyc,, +networkx/linalg/__pycache__/attrmatrix.cpython-310.pyc,, +networkx/linalg/__pycache__/bethehessianmatrix.cpython-310.pyc,, +networkx/linalg/__pycache__/graphmatrix.cpython-310.pyc,, +networkx/linalg/__pycache__/laplacianmatrix.cpython-310.pyc,, +networkx/linalg/__pycache__/modularitymatrix.cpython-310.pyc,, +networkx/linalg/__pycache__/spectrum.cpython-310.pyc,, +networkx/linalg/algebraicconnectivity.py,sha256=yQHSsXjJrD_6QqO9IYb2hKnfxE9HGOOLgwlqxDBWnWY,21148 +networkx/linalg/attrmatrix.py,sha256=AWZOBgLbTjpDA_l9YgAUF3Gt6mURWM7DtVLPLhM99S4,15512 +networkx/linalg/bethehessianmatrix.py,sha256=z-XEYIEQRh1tSuorPxrBGyqlT-6sgIMpGhaitU2BpAk,2696 +networkx/linalg/graphmatrix.py,sha256=HzparMcGmcXpIg1T5f7Y-dxPPkUydniTT4RGFrkxzSA,5521 +networkx/linalg/laplacianmatrix.py,sha256=ZPjZ66crPAdVQFuXq4rhwlCKDENf_JJZukAabac-fXs,20537 +networkx/linalg/modularitymatrix.py,sha256=dEbTSC-uQhPxqHcPGkY1SLKwRpz6XIW1Ln5jED_KBKs,4706 +networkx/linalg/spectrum.py,sha256=Cw0zOUMwbilsKO9EObTE6ABnOBQF-gPWIst-jIeHrXs,4214 +networkx/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/linalg/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/linalg/tests/__pycache__/test_algebraic_connectivity.cpython-310.pyc,, +networkx/linalg/tests/__pycache__/test_attrmatrix.cpython-310.pyc,, +networkx/linalg/tests/__pycache__/test_bethehessian.cpython-310.pyc,, +networkx/linalg/tests/__pycache__/test_graphmatrix.cpython-310.pyc,, +networkx/linalg/tests/__pycache__/test_laplacian.cpython-310.pyc,, +networkx/linalg/tests/__pycache__/test_modularity.cpython-310.pyc,, +networkx/linalg/tests/__pycache__/test_spectrum.cpython-310.pyc,, +networkx/linalg/tests/test_algebraic_connectivity.py,sha256=Kj2ct6gQ71xXFP7usAbFLJxD7ZdtTzneHiFJQOoVCUQ,13737 +networkx/linalg/tests/test_attrmatrix.py,sha256=XD3YuPc5yXKWbhwVSI8YiV_wABWM-rLtwf1uwwWlnI0,2833 +networkx/linalg/tests/test_bethehessian.py,sha256=0r-Do902ywV10TyqTlIJ2Ls3iMqM6sSs2PZbod7kWBM,1327 +networkx/linalg/tests/test_graphmatrix.py,sha256=e5YSH9ih1VL64nnYgZFDvLyKbP3BFqpp0jY6t-8b2eY,8708 +networkx/linalg/tests/test_laplacian.py,sha256=0AGJwezqohoQtrmTZ94Gvg5vISMCB7_G2QdJl7JFTXg,14081 +networkx/linalg/tests/test_modularity.py,sha256=mfKUvwc3bj6Rud1aG4oK3Eu1qg12o6cB8-pv5ZFicYY,3115 +networkx/linalg/tests/test_spectrum.py,sha256=agP2DsiEIvtkNUkT94mdPtJjwnobnjMTUOwjIQa4giA,2828 +networkx/readwrite/__init__.py,sha256=iHycAh1rjr4bCPQMNiHiqm8cP3iu-g1v_uKiGZtkuXY,562 +networkx/readwrite/__pycache__/__init__.cpython-310.pyc,, +networkx/readwrite/__pycache__/adjlist.cpython-310.pyc,, +networkx/readwrite/__pycache__/edgelist.cpython-310.pyc,, +networkx/readwrite/__pycache__/gexf.cpython-310.pyc,, +networkx/readwrite/__pycache__/gml.cpython-310.pyc,, +networkx/readwrite/__pycache__/graph6.cpython-310.pyc,, +networkx/readwrite/__pycache__/graphml.cpython-310.pyc,, +networkx/readwrite/__pycache__/leda.cpython-310.pyc,, +networkx/readwrite/__pycache__/multiline_adjlist.cpython-310.pyc,, +networkx/readwrite/__pycache__/p2g.cpython-310.pyc,, +networkx/readwrite/__pycache__/pajek.cpython-310.pyc,, +networkx/readwrite/__pycache__/sparse6.cpython-310.pyc,, +networkx/readwrite/__pycache__/text.cpython-310.pyc,, +networkx/readwrite/adjlist.py,sha256=UiwcjwVSrN1X5BUWKmxHt4aNJpYbGzLNtmLApHRP89g,8430 +networkx/readwrite/edgelist.py,sha256=3p1w6TV2cWkruVuiFqZv7yEbeuMS-dqraBSbtlN8Iv8,14232 +networkx/readwrite/gexf.py,sha256=R8-4bCbitvx7uz4F9TR2-AGVik-DYuD3Ouyo-iLJKtk,39692 +networkx/readwrite/gml.py,sha256=xn8QIMTfHjMcWW1LQiS_13InIupJlYQcCkLZACJ9gWg,31150 +networkx/readwrite/graph6.py,sha256=wCc_RVfyEvkkg2vOfUXVNFzcolTUKilMp0fuTlYy7I0,11400 +networkx/readwrite/graphml.py,sha256=hwbvL1rRWA3Da0dKyASvXifi9bB8Qu9pxS5c_6a0-iA,39317 +networkx/readwrite/json_graph/__init__.py,sha256=31_5zVLXYEZkjOB-TKXZ5bi83JybPWgpCaRKOXIGoOA,676 +networkx/readwrite/json_graph/__pycache__/__init__.cpython-310.pyc,, +networkx/readwrite/json_graph/__pycache__/adjacency.cpython-310.pyc,, +networkx/readwrite/json_graph/__pycache__/cytoscape.cpython-310.pyc,, +networkx/readwrite/json_graph/__pycache__/node_link.cpython-310.pyc,, +networkx/readwrite/json_graph/__pycache__/tree.cpython-310.pyc,, +networkx/readwrite/json_graph/adjacency.py,sha256=WM6fdncV87WDLPOfF-IbOlOOBMX0utUjJ09UsxtwRAo,4716 +networkx/readwrite/json_graph/cytoscape.py,sha256=kX6_p24F4CnDdT0D5lYrD0-jypyMdmqnGQEXKR1_kH4,5338 +networkx/readwrite/json_graph/node_link.py,sha256=iWlZX_Em_4mQbVXjXkikCtEWDLVxua7Bx0RmwwAzqkg,7473 +networkx/readwrite/json_graph/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/readwrite/json_graph/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/readwrite/json_graph/tests/__pycache__/test_adjacency.cpython-310.pyc,, +networkx/readwrite/json_graph/tests/__pycache__/test_cytoscape.cpython-310.pyc,, +networkx/readwrite/json_graph/tests/__pycache__/test_node_link.cpython-310.pyc,, +networkx/readwrite/json_graph/tests/__pycache__/test_tree.cpython-310.pyc,, +networkx/readwrite/json_graph/tests/test_adjacency.py,sha256=jueQE3Z_W5BZuCjr0hEsOWSfoQ2fP51p0o0m7IcXUuE,2456 +networkx/readwrite/json_graph/tests/test_cytoscape.py,sha256=vFoDzcSRI9THlmp4Fu2HHhIF9AUmECWs5mftVWjaWWs,2044 +networkx/readwrite/json_graph/tests/test_node_link.py,sha256=bDe2Vv1M4h0IDbKjS482p8ZE7SZtBfHDgZ1OEPibwoo,4536 +networkx/readwrite/json_graph/tests/test_tree.py,sha256=zBXv3_db2XGxFs3XQ35btNf_ku52aLXXiHZmmX4ixAs,1352 +networkx/readwrite/json_graph/tree.py,sha256=K4rF4Kds4g0JhgcPTrrR_I3Pswpze8yCVH4M-WF9nn0,3851 +networkx/readwrite/leda.py,sha256=VjpyUYeAWPD4TQSyvcC-ftcTeg6Pow9zJJqNuiGZ0zU,2797 +networkx/readwrite/multiline_adjlist.py,sha256=n6eLkGkp_rfiVTxLJzPSHm5ctiBc2zTshNDsbKprvcA,11291 +networkx/readwrite/p2g.py,sha256=_OVajlPGLynzYQMBp5QReAEMiQ_BXfEEATlV61sUYM4,3091 +networkx/readwrite/pajek.py,sha256=9j3sRjLzPQxqQFdEoTCOwICpdAf7G39cdls04dhErns,8738 +networkx/readwrite/sparse6.py,sha256=YY7gtCWuS0sxgueSB_lS9HkFRNW8hPvkMxchmfoPngw,10314 +networkx/readwrite/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/readwrite/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/readwrite/tests/__pycache__/test_adjlist.cpython-310.pyc,, +networkx/readwrite/tests/__pycache__/test_edgelist.cpython-310.pyc,, +networkx/readwrite/tests/__pycache__/test_gexf.cpython-310.pyc,, +networkx/readwrite/tests/__pycache__/test_gml.cpython-310.pyc,, +networkx/readwrite/tests/__pycache__/test_graph6.cpython-310.pyc,, +networkx/readwrite/tests/__pycache__/test_graphml.cpython-310.pyc,, +networkx/readwrite/tests/__pycache__/test_leda.cpython-310.pyc,, +networkx/readwrite/tests/__pycache__/test_p2g.cpython-310.pyc,, +networkx/readwrite/tests/__pycache__/test_pajek.cpython-310.pyc,, +networkx/readwrite/tests/__pycache__/test_sparse6.cpython-310.pyc,, +networkx/readwrite/tests/__pycache__/test_text.cpython-310.pyc,, +networkx/readwrite/tests/test_adjlist.py,sha256=ZGxGuM9AEV6xskWAJQmBndVJIemHVKBj02PpPnA6a-U,9430 +networkx/readwrite/tests/test_edgelist.py,sha256=dkc14_bCP8JD5cAFYza2mLHfirK-aNI6COl5i3hbHfc,9617 +networkx/readwrite/tests/test_gexf.py,sha256=Tbqueeh0XRQ8vtmGwXcyy9K3tWPlnLu6Gop0Hy4cZcc,19405 +networkx/readwrite/tests/test_gml.py,sha256=8_2nBU6n8zLHkApiuKkZNH-xMRSdA1G8ZH3Lvjspizg,21391 +networkx/readwrite/tests/test_graph6.py,sha256=DAi58D_G3j2UGk6VpfGkLGzfSAl318TIbuXSKKZ102U,6067 +networkx/readwrite/tests/test_graphml.py,sha256=MrU3AkdqNQ6gVLtOQrZUx39pV7PjS_ETu5uuT5Ce6BI,67573 +networkx/readwrite/tests/test_leda.py,sha256=_5F4nLLQ1oAZQMZtTQoFncZL0Oc-IsztFBglEdQeH3k,1392 +networkx/readwrite/tests/test_p2g.py,sha256=drsdod5amV9TGCk-qE2RwsvAop78IKEI1WguVFfd9rs,1320 +networkx/readwrite/tests/test_pajek.py,sha256=nc8f70J-fmMCOpLY-fdtmbjyMb2abWgzRFxZNnM7Ajs,4628 +networkx/readwrite/tests/test_sparse6.py,sha256=cqFHWz4G_kMawaRqceofN4K-JlkmPx3BEaDXkU8DD0o,5284 +networkx/readwrite/tests/test_text.py,sha256=w17FdFQ4vK3J8d2UKPZUEtIo5udp6UyilPXyIr8JfpE,56562 +networkx/readwrite/text.py,sha256=NdS9C0UU2DS8t49SbMnnkCtsOZF-ZPoSvuY4FpdZ82s,32126 +networkx/relabel.py,sha256=0HptAQOBToKhLZzxscd6FQpzVCNMlYmiHjHul69ct8o,10300 +networkx/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/tests/__pycache__/test_all_random_functions.cpython-310.pyc,, +networkx/tests/__pycache__/test_convert.cpython-310.pyc,, +networkx/tests/__pycache__/test_convert_numpy.cpython-310.pyc,, +networkx/tests/__pycache__/test_convert_pandas.cpython-310.pyc,, +networkx/tests/__pycache__/test_convert_scipy.cpython-310.pyc,, +networkx/tests/__pycache__/test_exceptions.cpython-310.pyc,, +networkx/tests/__pycache__/test_import.cpython-310.pyc,, +networkx/tests/__pycache__/test_lazy_imports.cpython-310.pyc,, +networkx/tests/__pycache__/test_relabel.cpython-310.pyc,, +networkx/tests/test_all_random_functions.py,sha256=DljfvNH8UTDiAORcrKrSbWwNPqouU8Ba0vjX5BqSG90,8713 +networkx/tests/test_convert.py,sha256=SoIVrqJFF9Gu9Jff_apfbpqg8QhkfC6QW4qzoSM-ukM,12731 +networkx/tests/test_convert_numpy.py,sha256=R4y5ud0hVZFSGrFjUHD6Anu_aaasy2O_Eke4FaOhPqU,14951 +networkx/tests/test_convert_pandas.py,sha256=cZJEdV0jP8afRZMqJ8-aL9Ma5NdXSWMuj1hVbjGMR2g,12257 +networkx/tests/test_convert_scipy.py,sha256=C2cY_8dgBksO0uttkhyCnjACXtC6KHjxqHUk47P5wH8,10436 +networkx/tests/test_exceptions.py,sha256=XYkpPzqMepSw3MPRUJN5LcFsUsy3YT_fiRDhm0OeAeQ,927 +networkx/tests/test_import.py,sha256=Gm4ujfH9JkQtDrSjOlwXXXUuubI057wskKLCkF6Z92k,220 +networkx/tests/test_lazy_imports.py,sha256=nKykNQPt_ZV8JxCH_EkwwcPNayAgZGQVf89e8I7uIlI,2680 +networkx/tests/test_relabel.py,sha256=dffbjiW_VUAQe7iD8knFS_KepUITt0F6xuwf7daWwKw,14517 +networkx/utils/__init__.py,sha256=F0y3R6cWX8hjdLK9eeP-EQCMCpufjGJnclN1zsn7jas,302 +networkx/utils/__pycache__/__init__.cpython-310.pyc,, +networkx/utils/__pycache__/backends.cpython-310.pyc,, +networkx/utils/__pycache__/configs.cpython-310.pyc,, +networkx/utils/__pycache__/decorators.cpython-310.pyc,, +networkx/utils/__pycache__/heaps.cpython-310.pyc,, +networkx/utils/__pycache__/mapped_queue.cpython-310.pyc,, +networkx/utils/__pycache__/misc.cpython-310.pyc,, +networkx/utils/__pycache__/random_sequence.cpython-310.pyc,, +networkx/utils/__pycache__/rcm.cpython-310.pyc,, +networkx/utils/__pycache__/union_find.cpython-310.pyc,, +networkx/utils/backends.py,sha256=a7iJuTc2rk9fRraeWXfBWn4xd2pRUdj6vhhOys4BFaw,68527 +networkx/utils/configs.py,sha256=gNiGYGH2OrhM3O1jmOhjjMG6x8qdw0aWY1tb3_k-WDQ,9107 +networkx/utils/decorators.py,sha256=eMWcHFooCJ-OWtlfDHEevRFNY8DzihP4XA6mdqhsmoI,46829 +networkx/utils/heaps.py,sha256=HUZuETHfELEqiXdMBPmD9fA2KiACVhp6iEahcrjFxYM,10391 +networkx/utils/mapped_queue.py,sha256=8hNMQtvXr7-fOzg-22xt3pWKrElkNGSSXspWgTcgdeQ,10185 +networkx/utils/misc.py,sha256=gyHBiNYDCJjYX1q59qC-DuWCopnN34T3wEd_enH98sk,19321 +networkx/utils/random_sequence.py,sha256=KzKh0BRMri0MBZlzxHNMl3qRTy2DnBexW3eDzmxKab4,4237 +networkx/utils/rcm.py,sha256=MeOhFkv91ALieKJtGHqkhxgO7KJBz53mB8tRcYCX3xk,4623 +networkx/utils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +networkx/utils/tests/__pycache__/__init__.cpython-310.pyc,, +networkx/utils/tests/__pycache__/test__init.cpython-310.pyc,, +networkx/utils/tests/__pycache__/test_backends.cpython-310.pyc,, +networkx/utils/tests/__pycache__/test_config.cpython-310.pyc,, +networkx/utils/tests/__pycache__/test_decorators.cpython-310.pyc,, +networkx/utils/tests/__pycache__/test_heaps.cpython-310.pyc,, +networkx/utils/tests/__pycache__/test_mapped_queue.cpython-310.pyc,, +networkx/utils/tests/__pycache__/test_misc.cpython-310.pyc,, +networkx/utils/tests/__pycache__/test_random_sequence.cpython-310.pyc,, +networkx/utils/tests/__pycache__/test_rcm.cpython-310.pyc,, +networkx/utils/tests/__pycache__/test_unionfind.cpython-310.pyc,, +networkx/utils/tests/test__init.py,sha256=QE0i-lNE4pG2eYjB2mZ0uw7jPD-7TdL7Y9p73JoWQmo,363 +networkx/utils/tests/test_backends.py,sha256=fs1176RB_1lecBFSE9hrsF2F6vlz40foIBUJvNAYf5M,2910 +networkx/utils/tests/test_config.py,sha256=Q3xZjdBQF4eM2nHg4lp3JXC873vch7U77pl0CCDFphA,5930 +networkx/utils/tests/test_decorators.py,sha256=dm3b5yiQPlnlT_4pSm0FwK-xBGV9dcnhv14Vh9Jiz1o,14050 +networkx/utils/tests/test_heaps.py,sha256=qCuWMzpcMH1Gwu014CAams78o151QD5YL0mB1fz16Yw,3711 +networkx/utils/tests/test_mapped_queue.py,sha256=l1Nguzz68Fv91FnAT7y7B0GXSoje9uoWiObHo7TliGM,7354 +networkx/utils/tests/test_misc.py,sha256=zkD1pYO4xBuBxlGe-nU8okcX6hfDMgu0OJZGu4TMrN0,8671 +networkx/utils/tests/test_random_sequence.py,sha256=Ou-IeCFybibZuycoin5gUQzzC-iy5yanZFmrqvdGt6Q,925 +networkx/utils/tests/test_rcm.py,sha256=UvUAkgmQMGk_Nn94TJyQsle4A5SLQFqMQWld1tiQ2lk,1421 +networkx/utils/tests/test_unionfind.py,sha256=j-DF5XyeJzq1hoeAgN5Nye2Au7EPD040t8oS4Aw2IwU,1579 +networkx/utils/union_find.py,sha256=NxKlBlyS71A1Wlnt28L-wyZoI9ExZvJth_0e2XSVris,3338 diff --git a/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..bab98d675883cc7567a79df485cd7b4f015e376f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.43.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/entry_points.txt b/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..2170e9f4285422f4f95b05fa682a9a479c19bf24 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/entry_points.txt @@ -0,0 +1,2 @@ +[networkx.backends] +nx-loopback = networkx.classes.tests.dispatch_interface:dispatcher diff --git a/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..4d07dfe2f85d6849d7f416dcce756b2501ba847e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/networkx-3.3.dist-info/top_level.txt @@ -0,0 +1 @@ +networkx diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/arrays/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/arrays/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a11755275d00e070bea6ab73a881b98d0b976551 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/arrays/__init__.py @@ -0,0 +1,53 @@ +""" +All of pandas' ExtensionArrays. + +See :ref:`extending.extension-types` for more. +""" +from pandas.core.arrays import ( + ArrowExtensionArray, + ArrowStringArray, + BooleanArray, + Categorical, + DatetimeArray, + FloatingArray, + IntegerArray, + IntervalArray, + NumpyExtensionArray, + PeriodArray, + SparseArray, + StringArray, + TimedeltaArray, +) + +__all__ = [ + "ArrowExtensionArray", + "ArrowStringArray", + "BooleanArray", + "Categorical", + "DatetimeArray", + "FloatingArray", + "IntegerArray", + "IntervalArray", + "NumpyExtensionArray", + "PeriodArray", + "SparseArray", + "StringArray", + "TimedeltaArray", +] + + +def __getattr__(name: str) -> type[NumpyExtensionArray]: + if name == "PandasArray": + # GH#53694 + import warnings + + from pandas.util._exceptions import find_stack_level + + warnings.warn( + "PandasArray has been renamed NumpyExtensionArray. Use that " + "instead. This alias will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return NumpyExtensionArray + raise AttributeError(f"module 'pandas.arrays' has no attribute '{name}'") diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/arrays/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/arrays/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..537655a3e5b97cd535dabca09389fb3fcaea088b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/arrays/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_series_apply.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_series_apply.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fe04bb324583e474e5c732be232c5b57df48650 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/apply/__pycache__/test_series_apply.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2d1f0ab7f9324f82f8e3147525edf58fe97ec24 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/flax.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/flax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9eba537e7e11df5657512bd3b444b081b1e4eda1 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/flax.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/tensorflow.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/tensorflow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97df8fdbec458d065c9cbcce2913fc842fa2603c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/tensorflow.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/torch.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/torch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7268b871295ab4cd9c815d3f0dd224e5b007776 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/safetensors/__pycache__/torch.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors/mlx.py b/llmeval-env/lib/python3.10/site-packages/safetensors/mlx.py new file mode 100644 index 0000000000000000000000000000000000000000..cf9fe37519c817e4d9db87e8ce53c2dc8b85254f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/safetensors/mlx.py @@ -0,0 +1,138 @@ +import os +from typing import Dict, Optional, Union + +import numpy as np + +import mlx.core as mx +from safetensors import numpy, safe_open + + +def save(tensors: Dict[str, mx.array], metadata: Optional[Dict[str, str]] = None) -> bytes: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, mx.array]`): + The incoming tensors. Tensors need to be contiguous and dense. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `bytes`: The raw bytes representing the format + + Example: + + ```python + from safetensors.mlx import save + import mlx.core as mx + + tensors = {"embedding": mx.zeros((512, 1024)), "attention": mx.zeros((256, 256))} + byte_data = save(tensors) + ``` + """ + np_tensors = _mx2np(tensors) + return numpy.save(np_tensors, metadata=metadata) + + +def save_file( + tensors: Dict[str, mx.array], + filename: Union[str, os.PathLike], + metadata: Optional[Dict[str, str]] = None, +) -> None: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, mx.array]`): + The incoming tensors. Tensors need to be contiguous and dense. + filename (`str`, or `os.PathLike`)): + The filename we're saving into. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `None` + + Example: + + ```python + from safetensors.mlx import save_file + import mlx.core as mx + + tensors = {"embedding": mx.zeros((512, 1024)), "attention": mx.zeros((256, 256))} + save_file(tensors, "model.safetensors") + ``` + """ + np_tensors = _mx2np(tensors) + return numpy.save_file(np_tensors, filename, metadata=metadata) + + +def load(data: bytes) -> Dict[str, mx.array]: + """ + Loads a safetensors file into MLX format from pure bytes. + + Args: + data (`bytes`): + The content of a safetensors file + + Returns: + `Dict[str, mx.array]`: dictionary that contains name as key, value as `mx.array` + + Example: + + ```python + from safetensors.mlx import load + + file_path = "./my_folder/bert.safetensors" + with open(file_path, "rb") as f: + data = f.read() + + loaded = load(data) + ``` + """ + flat = numpy.load(data) + return _np2mx(flat) + + +def load_file(filename: Union[str, os.PathLike]) -> Dict[str, mx.array]: + """ + Loads a safetensors file into MLX format. + + Args: + filename (`str`, or `os.PathLike`)): + The name of the file which contains the tensors + + Returns: + `Dict[str, mx.array]`: dictionary that contains name as key, value as `mx.array` + + Example: + + ```python + from safetensors.flax import load_file + + file_path = "./my_folder/bert.safetensors" + loaded = load_file(file_path) + ``` + """ + result = {} + with safe_open(filename, framework="mlx") as f: + for k in f.keys(): + result[k] = f.get_tensor(k) + return result + + +def _np2mx(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, mx.array]: + for k, v in numpy_dict.items(): + numpy_dict[k] = mx.array(v) + return numpy_dict + + +def _mx2np(mx_dict: Dict[str, mx.array]) -> Dict[str, np.array]: + new_dict = {} + for k, v in mx_dict.items(): + new_dict[k] = np.asarray(v) + return new_dict diff --git a/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..7fa40cf5fe093c0c9246746e07a4f04fbc5b565d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Tsuyoshi Hombashi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..c5d2c093a8f61000277e48d8ad7b1c95c3a2beca --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/METADATA @@ -0,0 +1,171 @@ +Metadata-Version: 2.1 +Name: tcolorpy +Version: 0.1.6 +Summary: tcolopy is a Python library to apply true color for terminal text. +Home-page: https://github.com/thombashi/tcolorpy +Author: Tsuyoshi Hombashi +Author-email: tsuyoshi.hombashi@gmail.com +License: MIT License +Project-URL: Changlog, https://github.com/thombashi/tcolorpy/blob/master/CHANGELOG.md +Project-URL: Source, https://github.com/thombashi/tcolorpy +Project-URL: Tracker, https://github.com/thombashi/tcolorpy/issues +Keywords: ANSI escape,terminal color,truecolor +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Information Technology +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Terminals +Classifier: Topic :: Text Processing +Classifier: Typing :: Typed +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Provides-Extra: test +Requires-Dist: pytest >=6.0.1 ; extra == 'test' +Requires-Dist: pytest-md-report >=0.5 ; extra == 'test' + +.. contents:: **tcolorpy** + :backlinks: top + :depth: 2 + + +Summary +============================================ +tcolopy is a Python library to apply true color for terminal text. + +|PyPI pkg ver| |conda pkg ver| |Supported Python implementations| |Supported Python versions| |CI status| |CodeQL| |coverage| + +.. |PyPI pkg ver| image:: https://badge.fury.io/py/tcolorpy.svg + :target: https://badge.fury.io/py/tcolorpy + :alt: PyPI package version + +.. |conda pkg ver| image:: https://anaconda.org/conda-forge/tcolorpy/badges/version.svg + :target: https://anaconda.org/conda-forge/tcolorpy + :alt: conda-forge package version + +.. |Supported Python implementations| image:: https://img.shields.io/pypi/implementation/tcolorpy.svg + :target: https://pypi.org/project/tcolorpy + :alt: Supported Python implementations + +.. |Supported Python versions| image:: https://img.shields.io/pypi/pyversions/tcolorpy.svg + :target: https://pypi.org/project/tcolorpy + :alt: Supported Python versions + +.. |CI status| image:: https://github.com/thombashi/tcolorpy/actions/workflows/ci.yml/badge.svg + :target: https://github.com/thombashi/tcolorpy/actions/workflows/ci.yml + :alt: CI status of Linux/macOS/Windows + +.. |CodeQL| image:: https://github.com/thombashi/tcolorpy/actions/workflows/github-code-scanning/codeql/badge.svg + :target: https://github.com/thombashi/tcolorpy/actions/workflows/github-code-scanning/codeql + :alt: CodeQL + +.. |coverage| image:: https://coveralls.io/repos/github/thombashi/tcolorpy/badge.svg?branch=master + :target: https://coveralls.io/github/thombashi/tcolorpy?branch=master + :alt: Test coverage: coveralls + + +Installation +============================================ + +Installation: pip +------------------------------ +:: + + pip install tcolorpy + +Installation: conda +------------------------------ +:: + + conda install -c conda-forge tcolorpy + + +Usage +============================================ + +Library usage +-------------------------------------------- + +:Sample Code: + .. code-block:: python + + from tcolorpy import tcolor + + print(tcolor("tcolopy example", color="#ee1177", styles=["bold", "italic", "underline"])) + +:Output: + .. figure:: https://cdn.jsdelivr.net/gh/thombashi/tcolorpy@master/ss/oneline.png + :scale: 60% + :alt: https://github.com/thombashi/tcolorpy/blob/master/ss/oneline.png + +You can set the following ``tcolor`` arguments: + +- ``color``/``bg_color`` + - color names (``"red"``, ``"green"``, etc.) or color code (``"#RRGGBB"``) +- ``styles`` + - ``"bold"``, ``"italic"``, etc. + + +Other examples +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Apply true color and styles to text: + +.. figure:: https://cdn.jsdelivr.net/gh/thombashi/tcolorpy@master/ss/styles.png + :scale: 60% + :alt: https://github.com/thombashi/tcolorpy/blob/master/ss/styles.png + + `example source code `__ + +You can also specify colors by name: + +.. figure:: https://cdn.jsdelivr.net/gh/thombashi/tcolorpy@master/ss/ansi_colors.png + :scale: 60% + :alt: https://github.com/thombashi/tcolorpy/blob/master/ss/ansi_colors.png + + `example source code `__ + + +CLI usage +-------------------------------------------- +``tcolorpy`` can be used via CLI: + +:: + + $ python3 -m tcolorpy "tcolopy example" -c "#ee1177" -s bold,italic,underline + +Command help +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +:: + + usage: __main__.py [-h] [-c COLOR] [-b BG_COLOR] [-s STYLES] [--encode ENCODE] string + + positional arguments: + string string to apply styles. + + options: + -h, --help show this help message and exit + -c COLOR, --color COLOR + specify a color code (#XXXXXX) or a name. valid names are: black, red, green, yellow, blue, magenta, cyan, white, lightblack, lightred, lightgreen, lightyellow, lightblue, lightmagenta, lightcyan, lightwhite + -b BG_COLOR, --bg-color BG_COLOR + specify a background color code (#XXXXXX) or a name. valid names are: black, red, green, yellow, blue, magenta, cyan, white, lightblack, lightred, lightgreen, lightyellow, lightblue, lightmagenta, lightcyan, lightwhite + -s STYLES, --styles STYLES + specify a comma-separated style. valid values are: bold, dim, italic, underline, blink, invert, strike + --encode ENCODE output a text encoded with the specified encoding + + +Dependencies +============================================ +Python 3.7+ +no external dependencies. diff --git a/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..ccbfc1eabb32736954461cc4fa8597c78abb506c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/RECORD @@ -0,0 +1,17 @@ +tcolorpy-0.1.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +tcolorpy-0.1.6.dist-info/LICENSE,sha256=9BoEVtXyu6Jf1NflC1GpXeMEdw_x21p5UV0DOXqRTY0,1074 +tcolorpy-0.1.6.dist-info/METADATA,sha256=IDGYAt_oFtLBO4jHLKx8SETH0FP33K-RaszTkTLhMes,6358 +tcolorpy-0.1.6.dist-info/RECORD,, +tcolorpy-0.1.6.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92 +tcolorpy-0.1.6.dist-info/top_level.txt,sha256=g8LDaQz0FVP61jibPz7OTwQqiseVV9pxUYDeGp2lFAI,9 +tcolorpy/__init__.py,sha256=6fI5Y7N04ZgSFfienFNtd7hjJtAmBO8j4zxcDpk4OYk,913 +tcolorpy/__main__.py,sha256=gjNpi78hE-X6CpY20ZLMmQ_yaWYIh_eOu2XrLnoGkBE,1701 +tcolorpy/__pycache__/__init__.cpython-310.pyc,, +tcolorpy/__pycache__/__main__.cpython-310.pyc,, +tcolorpy/__pycache__/__version__.cpython-310.pyc,, +tcolorpy/__pycache__/_const.cpython-310.pyc,, +tcolorpy/__pycache__/_truecolor.cpython-310.pyc,, +tcolorpy/__version__.py,sha256=FfUl1ix-FI5DHv8TmnpAYpPWggJASYcLGQ0s-sVO6Ko,201 +tcolorpy/_const.py,sha256=XS2rzsxY7SKxg0HreYTR_kEGeSi_59gOrrntI2_kG1o,1080 +tcolorpy/_truecolor.py,sha256=nzu2GCc6Tu_4no5_Qcksm88-Vm75sCdeOMDQHG_2DhM,7495 +tcolorpy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..bab98d675883cc7567a79df485cd7b4f015e376f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.43.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..eda2ffb721c9f828b59d81ca04550452a1cde8cc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tcolorpy-0.1.6.dist-info/top_level.txt @@ -0,0 +1 @@ +tcolorpy diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_C.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/torch/_C.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ba1b6403f09ffb72b9ebb6ee18afbf52ae73e83b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_C.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_VF.pyi b/llmeval-env/lib/python3.10/site-packages/torch/_VF.pyi new file mode 100644 index 0000000000000000000000000000000000000000..8f6e61bf678ae19145799f44bb6beceac9bf24c9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_VF.pyi @@ -0,0 +1,25648 @@ +# @generated from torch/_C/_VariableFunctions.pyi.in +# mypy: disable-error-code="type-arg" + +import builtins +from typing import ( + Any, + Callable, + ContextManager, + Iterator, + List, + Literal, + NamedTuple, + Optional, + overload, + Sequence, + Tuple, + TypeVar, + Union, +) + +import torch +from torch import contiguous_format, Generator, inf, memory_format, strided, SymInt, Tensor +from torch.types import ( + _bool, + _complex, + _device, + _dtype, + _float, + _int, + _layout, + _qscheme, + _size, + Device, + Number, +) + +from torch._prims_common import DeviceLikeType + +@overload +def __and__(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def __and__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +@overload +def __lshift__(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def __lshift__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +@overload +def __or__(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def __or__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +@overload +def __rshift__(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def __rshift__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +@overload +def __xor__(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def __xor__(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +def _adaptive_avg_pool2d(input: Tensor, output_size: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]]) -> Tensor: ... +def _adaptive_avg_pool3d(input: Tensor, output_size: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]]) -> Tensor: ... +def _add_batch_dim(input: Tensor, batch_dim: _int, level: _int) -> Tensor: ... +@overload +def _add_relu(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def _add_relu(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ... +@overload +def _add_relu_(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ... +@overload +def _add_relu_(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ... +def _addmm_activation(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, use_gelu: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def _aminmax(input: Tensor) -> Tuple[Tensor, Tensor]: ... +@overload +def _aminmax(input: Tensor, dim: _int, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: ... +def _amp_foreach_non_finite_check_and_unscale_(self: Union[Tuple[Tensor, ...], List[Tensor]], found_inf: Tensor, inv_scale: Tensor) -> None: ... +def _amp_update_scale_(input: Tensor, growth_tracker: Tensor, found_inf: Tensor, scale_growth_factor: _float, scale_backoff_factor: _float, growth_interval: _int) -> Tensor: ... +@overload +def _assert_async(input: Tensor) -> None: + r""" + _assert_async(tensor) -> void + + Asynchronously assert that the contents of tensor are nonzero. For CPU tensors, + this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for + CUDA tensors, we DO NOT synchronize and you may only find out the assertion + failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for + testing invariants in CUDA tensors without giving up performance. This function + is NOT intended to be used for regular error checking, as it will trash your CUDA + context if the assert fails (forcing you to restart your PyTorch process.) + + Args: + tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero + elements (including False for boolean tensors) cause an assertion failure + to be raised. + """ + ... +@overload +def _assert_async(input: Tensor, assert_msg: str) -> None: + r""" + _assert_async(tensor) -> void + + Asynchronously assert that the contents of tensor are nonzero. For CPU tensors, + this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for + CUDA tensors, we DO NOT synchronize and you may only find out the assertion + failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for + testing invariants in CUDA tensors without giving up performance. This function + is NOT intended to be used for regular error checking, as it will trash your CUDA + context if the assert fails (forcing you to restart your PyTorch process.) + + Args: + tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero + elements (including False for boolean tensors) cause an assertion failure + to be raised. + """ + ... +def _assert_scalar(self: Union[Number, _complex], assert_msg: str) -> None: ... +def _assert_tensor_metadata(a: Tensor, size: Optional[Sequence[Union[_int, SymInt]]] = None, stride: Optional[Sequence[Union[_int, SymInt]]] = None, dtype: Optional[_dtype] = None) -> None: ... +def _batch_norm_impl_index(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, _int]: ... +def _cast_Byte(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Char(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Double(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Float(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Half(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Int(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Long(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _cast_Short(input: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _choose_qparams_per_tensor(input: Tensor, reduce_range: _bool = False) -> Tuple[_float, _int]: ... +def _chunk_cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int, num_chunks: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +def _coalesce(input: Tensor) -> Tensor: ... +def _compute_linear_combination(input: Tensor, coefficients: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _conj(input: Tensor) -> Tensor: ... +def _conj_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _conj_physical(input: Tensor) -> Tensor: ... +def _convert_indices_from_coo_to_csr(input: Tensor, size: _int, *, out_int32: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +def _convert_indices_from_csr_to_coo(crow_indices: Tensor, col_indices: Tensor, *, out_int32: _bool = False, transpose: _bool = False, out: Optional[Tensor] = None) -> Tensor: ... +def _convert_weight_to_int4pack(input: Tensor, innerKTiles: _int) -> Tensor: ... +@overload +def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: _size, groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool) -> Tensor: ... +@overload +def _convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, cudnn_enabled: _bool, allow_tf32: _bool) -> Tensor: ... +def _convolution_mode(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: str, dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def _copy_from(input: Tensor, dst: Tensor, non_blocking: _bool = False) -> Tensor: ... +def _copy_from_and_resize(input: Tensor, dst: Tensor) -> Tensor: ... +def _cslt_compress(input: Tensor) -> Tensor: ... +def _cslt_sparse_mm(compressed_A: Tensor, dense_B: Tensor, bias: Optional[Tensor] = None, alpha: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, transpose_result: _bool = False, alg_id: _int = 0) -> Tensor: ... +def _cslt_sparse_mm_search(compressed_A: Tensor, dense_B: Tensor, bias: Optional[Tensor] = None, alpha: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, transpose_result: _bool = False) -> _int: ... +@overload +def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int = 0, zero_infinity: _bool = False) -> Tuple[Tensor, Tensor]: ... +@overload +def _ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int = 0, zero_infinity: _bool = False) -> Tuple[Tensor, Tensor]: ... +@overload +def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ... +@overload +def _cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int, deterministic: _bool, zero_infinity: _bool) -> Tuple[Tensor, Tensor]: ... +def _cudnn_init_dropout_state(dropout: _float, train: _bool, dropout_seed: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def _cudnn_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, weight_buf: Optional[Tensor], hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: Sequence[Union[_int, SymInt]], dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ... +def _cudnn_rnn_flatten_weight(weight_arr: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, input_size: Union[_int, SymInt], mode: _int, hidden_size: Union[_int, SymInt], proj_size: Union[_int, SymInt], num_layers: _int, batch_first: _bool, bidirectional: _bool) -> Tensor: ... +def _cufft_clear_plan_cache(device_index: _int) -> None: ... +def _cufft_get_plan_cache_max_size(device_index: _int) -> _int: ... +def _cufft_get_plan_cache_size(device_index: _int) -> _int: ... +def _cufft_set_plan_cache_max_size(device_index: _int, max_size: _int) -> None: ... +def _cummax_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ... +def _cummin_helper(input: Tensor, values: Tensor, indices: Tensor, dim: _int) -> None: ... +def _debug_has_internal_overlap(input: Tensor) -> _int: ... +def _dim_arange(like: Tensor, dim: _int) -> Tensor: ... +def _dirichlet_grad(x: Tensor, alpha: Tensor, total: Tensor) -> Tensor: ... +def _disable_functionalization(): ... +@overload +def _efficientzerotensor(size: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def _efficientzerotensor(*size: _int, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def _embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False, padding_idx: _int = -1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +def _embedding_bag_forward_only(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False, padding_idx: _int = -1) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +@overload +def _empty_affine_quantized(size: Sequence[Union[_int, SymInt]], *, scale: _float = 1, zero_point: _int = 0, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def _empty_affine_quantized(*size: _int, scale: _float = 1, zero_point: _int = 0, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def _empty_per_channel_affine_quantized(size: Sequence[Union[_int, SymInt]], *, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def _empty_per_channel_affine_quantized(*size: _int, scales: Tensor, zero_points: Tensor, axis: _int, memory_format: Optional[memory_format] = contiguous_format, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def _enable_functionalization(*, reapply_views: _bool = False): ... +def _euclidean_dist(x1: Tensor, x2: Tensor) -> Tensor: ... +def _fake_quantize_learnable_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int, grad_factor: _float = 1.0) -> Tensor: ... +def _fake_quantize_learnable_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int, grad_factor: _float = 1.0) -> Tensor: ... +def _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(input: Tensor, scale: Tensor, zero_point: Tensor, fake_quant_enabled: Tensor, quant_min: _int, quant_max: _int) -> torch.return_types._fake_quantize_per_tensor_affine_cachemask_tensor_qparams: ... +def _fft_c2c(input: Tensor, dim: Sequence[Union[_int, SymInt]], normalization: _int, forward: _bool, *, out: Optional[Tensor] = None) -> Tensor: ... +def _fft_c2r(input: Tensor, dim: _size, normalization: _int, last_dim_size: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor: ... +def _fft_r2c(input: Tensor, dim: _size, normalization: _int, onesided: _bool, *, out: Optional[Tensor] = None) -> Tensor: ... +def _fill_mem_eff_dropout_mask_(input: Tensor, dropout_p: _float, seed: _int, offset: _int) -> Tensor: ... +def _foobar(input: Tensor, arg1: _bool = True, arg2: _bool = True, *, arg3: _bool = True) -> Tensor: ... +def _foreach_abs(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_abs(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.abs` to each Tensor of the input list. + """ + ... +def _foreach_abs_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_abs_(self: List[Tensor]) -> None + + Apply :func:`torch.abs` to each Tensor of the input list. + """ + ... +def _foreach_acos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_acos(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.acos` to each Tensor of the input list. + """ + ... +def _foreach_acos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_acos_(self: List[Tensor]) -> None + + Apply :func:`torch.acos` to each Tensor of the input list. + """ + ... +@overload +def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_add(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> None: ... +@overload +def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor, *, alpha: Union[Number, _complex] = 1) -> None: ... +@overload +def _foreach_add_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_addcdiv(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ... +@overload +def _foreach_addcdiv_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> None: ... +@overload +def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_addcmul(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Tensor) -> None: ... +@overload +def _foreach_addcmul_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensor1: Union[Tuple[Tensor, ...], List[Tensor]], tensor2: Union[Tuple[Tensor, ...], List[Tensor]], value: Union[Number, _complex] = 1) -> None: ... +def _foreach_asin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_asin(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.asin` to each Tensor of the input list. + """ + ... +def _foreach_asin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_asin_(self: List[Tensor]) -> None + + Apply :func:`torch.asin` to each Tensor of the input list. + """ + ... +def _foreach_atan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_atan(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.atan` to each Tensor of the input list. + """ + ... +def _foreach_atan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_atan_(self: List[Tensor]) -> None + + Apply :func:`torch.atan` to each Tensor of the input list. + """ + ... +def _foreach_ceil(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_ceil(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.ceil` to each Tensor of the input list. + """ + ... +def _foreach_ceil_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_ceil_(self: List[Tensor]) -> None + + Apply :func:`torch.ceil` to each Tensor of the input list. + """ + ... +@overload +def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_clamp_max(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_clamp_max_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +@overload +def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_clamp_min(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_clamp_min_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_copy_(self: Union[Tuple[Tensor, ...], List[Tensor]], src: Union[Tuple[Tensor, ...], List[Tensor]], non_blocking: _bool = False) -> None: ... +def _foreach_cos(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_cos(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.cos` to each Tensor of the input list. + """ + ... +def _foreach_cos_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_cos_(self: List[Tensor]) -> None + + Apply :func:`torch.cos` to each Tensor of the input list. + """ + ... +def _foreach_cosh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_cosh(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.cosh` to each Tensor of the input list. + """ + ... +def _foreach_cosh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_cosh_(self: List[Tensor]) -> None + + Apply :func:`torch.cosh` to each Tensor of the input list. + """ + ... +@overload +def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_div(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> None: ... +@overload +def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_div_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_erf(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_erf(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.erf` to each Tensor of the input list. + """ + ... +def _foreach_erf_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_erf_(self: List[Tensor]) -> None + + Apply :func:`torch.erf` to each Tensor of the input list. + """ + ... +def _foreach_erfc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_erfc(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.erfc` to each Tensor of the input list. + """ + ... +def _foreach_erfc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_erfc_(self: List[Tensor]) -> None + + Apply :func:`torch.erfc` to each Tensor of the input list. + """ + ... +def _foreach_exp(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_exp(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.exp` to each Tensor of the input list. + """ + ... +def _foreach_exp_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_exp_(self: List[Tensor]) -> None + + Apply :func:`torch.exp` to each Tensor of the input list. + """ + ... +def _foreach_expm1(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_expm1(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.expm1` to each Tensor of the input list. + """ + ... +def _foreach_expm1_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_expm1_(self: List[Tensor]) -> None + + Apply :func:`torch.expm1` to each Tensor of the input list. + """ + ... +def _foreach_floor(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_floor(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.floor` to each Tensor of the input list. + """ + ... +def _foreach_floor_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_floor_(self: List[Tensor]) -> None + + Apply :func:`torch.floor` to each Tensor of the input list. + """ + ... +def _foreach_frac(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_frac(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.frac` to each Tensor of the input list. + """ + ... +def _foreach_frac_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_frac_(self: List[Tensor]) -> None + + Apply :func:`torch.frac` to each Tensor of the input list. + """ + ... +@overload +def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_lerp(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weight: Union[Number, _complex]) -> None: ... +@overload +def _foreach_lerp_(self: Union[Tuple[Tensor, ...], List[Tensor]], tensors1: Union[Tuple[Tensor, ...], List[Tensor]], weights: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_lgamma(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_lgamma(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.lgamma` to each Tensor of the input list. + """ + ... +def _foreach_lgamma_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_lgamma_(self: List[Tensor]) -> None + + Apply :func:`torch.lgamma` to each Tensor of the input list. + """ + ... +def _foreach_log(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_log(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.log` to each Tensor of the input list. + """ + ... +def _foreach_log10(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_log10(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.log10` to each Tensor of the input list. + """ + ... +def _foreach_log10_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_log10_(self: List[Tensor]) -> None + + Apply :func:`torch.log10` to each Tensor of the input list. + """ + ... +def _foreach_log1p(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_log1p(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.log1p` to each Tensor of the input list. + """ + ... +def _foreach_log1p_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_log1p_(self: List[Tensor]) -> None + + Apply :func:`torch.log1p` to each Tensor of the input list. + """ + ... +def _foreach_log2(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_log2(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.log2` to each Tensor of the input list. + """ + ... +def _foreach_log2_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_log2_(self: List[Tensor]) -> None + + Apply :func:`torch.log2` to each Tensor of the input list. + """ + ... +def _foreach_log_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_log_(self: List[Tensor]) -> None + + Apply :func:`torch.log` to each Tensor of the input list. + """ + ... +@overload +def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_maximum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_maximum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +@overload +def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_minimum(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_minimum_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +@overload +def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_mul(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Tensor) -> None: ... +@overload +def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +@overload +def _foreach_mul_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_neg(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_neg(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.neg` to each Tensor of the input list. + """ + ... +def _foreach_neg_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_neg_(self: List[Tensor]) -> None + + Apply :func:`torch.neg` to each Tensor of the input list. + """ + ... +def _foreach_norm(self: Union[Tuple[Tensor, ...], List[Tensor]], ord: Union[Number, _complex] = 2) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_pow(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_pow(self: Union[Number, _complex], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Number, _complex]) -> None: ... +@overload +def _foreach_pow_(self: Union[Tuple[Tensor, ...], List[Tensor]], exponent: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_reciprocal(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_reciprocal(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.reciprocal` to each Tensor of the input list. + """ + ... +def _foreach_reciprocal_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_reciprocal_(self: List[Tensor]) -> None + + Apply :func:`torch.reciprocal` to each Tensor of the input list. + """ + ... +def _foreach_round(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_round(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.round` to each Tensor of the input list. + """ + ... +def _foreach_round_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_round_(self: List[Tensor]) -> None + + Apply :func:`torch.round` to each Tensor of the input list. + """ + ... +def _foreach_sigmoid(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_sigmoid(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.sigmoid` to each Tensor of the input list. + """ + ... +def _foreach_sigmoid_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_sigmoid_(self: List[Tensor]) -> None + + Apply :func:`torch.sigmoid` to each Tensor of the input list. + """ + ... +def _foreach_sign(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +def _foreach_sign_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: ... +def _foreach_sin(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_sin(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.sin` to each Tensor of the input list. + """ + ... +def _foreach_sin_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_sin_(self: List[Tensor]) -> None + + Apply :func:`torch.sin` to each Tensor of the input list. + """ + ... +def _foreach_sinh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_sinh(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.sinh` to each Tensor of the input list. + """ + ... +def _foreach_sinh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_sinh_(self: List[Tensor]) -> None + + Apply :func:`torch.sinh` to each Tensor of the input list. + """ + ... +def _foreach_sqrt(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_sqrt(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.sqrt` to each Tensor of the input list. + """ + ... +def _foreach_sqrt_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_sqrt_(self: List[Tensor]) -> None + + Apply :func:`torch.sqrt` to each Tensor of the input list. + """ + ... +@overload +def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_sub(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> Tuple[Tensor, ...]: ... +@overload +def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalars: Sequence[Union[Number, _complex]]) -> None: ... +@overload +def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], other: Union[Tuple[Tensor, ...], List[Tensor]], *, alpha: Union[Number, _complex] = 1) -> None: ... +@overload +def _foreach_sub_(self: Union[Tuple[Tensor, ...], List[Tensor]], scalar: Union[Number, _complex]) -> None: ... +def _foreach_tan(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_tan(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.tan` to each Tensor of the input list. + """ + ... +def _foreach_tan_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_tan_(self: List[Tensor]) -> None + + Apply :func:`torch.tan` to each Tensor of the input list. + """ + ... +def _foreach_tanh(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_tanh(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.tanh` to each Tensor of the input list. + """ + ... +def _foreach_tanh_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_tanh_(self: List[Tensor]) -> None + + Apply :func:`torch.tanh` to each Tensor of the input list. + """ + ... +def _foreach_trunc(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + _foreach_trunc(self: List[Tensor]) -> List[Tensor] + + Apply :func:`torch.trunc` to each Tensor of the input list. + """ + ... +def _foreach_trunc_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_trunc_(self: List[Tensor]) -> None + + Apply :func:`torch.trunc` to each Tensor of the input list. + """ + ... +def _foreach_zero_(self: Union[Tuple[Tensor, ...], List[Tensor]]) -> None: + r""" + _foreach_zero_(self: List[Tensor]) -> None + + Apply :func:`torch.zero` to each Tensor of the input list. + """ + ... +def _from_functional_tensor(t: Tensor) -> Tensor: ... +def _functional_assert_async(input: Tensor, assert_msg: str, dep_token: Tensor) -> Tensor: ... +def _functional_assert_scalar(self: Union[Number, _complex], assert_msg: str, dep_token: Tensor) -> Tensor: ... +def _functional_sym_constrain_range(size: Union[Number, _complex], min: Optional[_int], max: Optional[_int], dep_token: Tensor) -> Tensor: ... +def _functional_sym_constrain_range_for_size(size: Union[Number, _complex], min: Optional[_int], max: Optional[_int], dep_token: Tensor) -> Tensor: ... +def _functionalize_are_all_mutations_hidden_from_autograd(t: Tensor) -> _bool: ... +def _functionalize_are_all_mutations_under_no_grad_or_inference_mode(t: Tensor) -> _bool: ... +def _functionalize_commit_update(t: Tensor) -> None: ... +def _functionalize_mark_mutation_hidden_from_autograd(t: Tensor) -> None: ... +def _functionalize_replace(self_: Tensor, other: Tensor) -> None: ... +def _functionalize_sync(t: Tensor) -> None: ... +@overload +def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: Tensor, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ... +@overload +def _fused_adam_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ... +@overload +def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: Tensor, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ... +@overload +def _fused_adamw_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], exp_avgs: Union[Tuple[Tensor, ...], List[Tensor]], exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], max_exp_avg_sqs: Union[Tuple[Tensor, ...], List[Tensor]], state_steps: Union[Tuple[Tensor, ...], List[Tensor]], *, lr: _float, beta1: _float, beta2: _float, weight_decay: _float, eps: _float, amsgrad: _bool, maximize: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ... +def _fused_dropout(input: Tensor, p: _float, generator: Optional[Generator] = None) -> Tuple[Tensor, Tensor]: ... +def _fused_moving_avg_obs_fq_helper(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool = False, symmetric_quant: _bool = False) -> torch.return_types._fused_moving_avg_obs_fq_helper: ... +def _fused_sdp_choice(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: _float = 0.0, is_causal: _bool = False, *, scale: Optional[_float] = None) -> _int: ... +@overload +def _fused_sgd_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], momentum_buffer_list: Union[Tuple[Tensor, ...], List[Tensor]], *, weight_decay: _float, momentum: _float, lr: Tensor, dampening: _float, nesterov: _bool, maximize: _bool, is_first_step: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ... +@overload +def _fused_sgd_(self: Union[Tuple[Tensor, ...], List[Tensor]], grads: Union[Tuple[Tensor, ...], List[Tensor]], momentum_buffer_list: Union[Tuple[Tensor, ...], List[Tensor]], *, weight_decay: _float, momentum: _float, lr: _float, dampening: _float, nesterov: _bool, maximize: _bool, is_first_step: _bool, grad_scale: Optional[Tensor] = None, found_inf: Optional[Tensor] = None) -> None: ... +def _fw_primal_copy(input: Tensor, level: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +def _grid_sampler_2d_cpu_fallback(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ... +def _has_compatible_shallow_copy_type(input: Tensor, from_: Tensor) -> _bool: ... +def _histogramdd_bin_edges(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> Tuple[Tensor, ...]: ... +def _histogramdd_from_bin_cts(input: Tensor, bins: _size, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> Tensor: ... +def _histogramdd_from_bin_tensors(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], *, weight: Optional[Tensor] = None, density: _bool = False) -> Tensor: ... +def _index_put_impl_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False, unsafe: _bool = False) -> Tensor: ... +def _indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _int_mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _is_all_true(input: Tensor) -> Tensor: ... +def _is_any_true(input: Tensor) -> Tensor: ... +def _is_functional_tensor(t: Tensor) -> _bool: ... +def _is_zerotensor(input: Tensor) -> _bool: ... +def _lazy_clone(input: Tensor) -> Tensor: ... +def _linalg_check_errors(info: Tensor, api_name: str, *, is_matrix: _bool) -> None: ... +def _linalg_det(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_det: ... +def _linalg_eigh(A: Tensor, UPLO: str = "L", compute_v: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_eigh: ... +def _linalg_slogdet(A: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_slogdet: ... +def _linalg_solve_ex(A: Tensor, B: Tensor, *, left: _bool = True, check_errors: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_solve_ex: ... +def _linalg_svd(A: Tensor, full_matrices: _bool = False, compute_uv: _bool = True, *, driver: Optional[str] = None, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types._linalg_svd: ... +def _log_softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor] = None) -> Tensor: ... +def _log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, out: Optional[Tensor] = None) -> Tensor: ... +def _logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +def _lstm_mps(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: ... +def _lu_with_info(input: Tensor, pivot: _bool = True, check_errors: _bool = True) -> torch.return_types._lu_with_info: ... +def _make_dep_token(*, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def _make_dual(primal: Tensor, tangent: Tensor, level: _int) -> Tensor: ... +def _make_dual_copy(primal: Tensor, tangent: Tensor, level: _int, *, out: Optional[Tensor] = None) -> Tensor: ... +def _make_per_channel_quantized_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int) -> Tensor: ... +def _make_per_tensor_quantized_tensor(input: Tensor, scale: _float, zero_point: _int) -> Tensor: ... +def _masked_scale(input: Tensor, mask: Tensor, scale: _float) -> Tensor: ... +def _masked_softmax(input: Tensor, mask: Tensor, dim: Optional[_int] = None, mask_type: Optional[_int] = None) -> Tensor: ... +def _mixed_dtypes_linear(input: Tensor, weight: Tensor, scale: Tensor, *, bias: Optional[Tensor] = None, activation: Optional[str] = None) -> Tensor: ... +def _mkldnn_reshape(input: Tensor, shape: _size) -> Tensor: ... +def _mkldnn_transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ... +def _mkldnn_transpose_(input: Tensor, dim0: _int, dim1: _int) -> Tensor: ... +def _mps_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def _mps_convolution_transpose(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +@overload +def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ... +@overload +def _native_batch_norm_legit(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ... +def _native_batch_norm_legit_no_training(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Tensor, running_var: Tensor, momentum: _float, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ... +def _native_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor] = None, need_weights: _bool = True, average_attn_weights: _bool = True, mask_type: Optional[_int] = None) -> Tuple[Tensor, Tensor]: ... +def _neg_view(input: Tensor) -> Tensor: ... +def _neg_view_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _nested_from_padded(padded: Tensor, cpu_nested_shape_example: Tensor, fuse_transform_0213: _bool = False) -> Tensor: ... +def _nested_from_padded_and_nested_example(padded: Tensor, nt_example: Tensor) -> Tensor: ... +def _nested_get_jagged_dummy(any: Tensor) -> Tensor: ... +def _nested_get_lengths(input: Tensor) -> Tensor: ... +def _nested_get_offsets(input: Tensor) -> Tensor: ... +def _nested_get_ragged_idx(input: Tensor) -> _int: ... +def _nested_get_values(input: Tensor) -> Tensor: ... +def _nested_get_values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _nested_tensor_from_mask(t: Tensor, mask: Tensor, mask_check: _bool = True) -> Tensor: ... +def _nested_tensor_from_mask_left_aligned(t: Tensor, mask: Tensor) -> _bool: ... +def _nested_tensor_from_tensor_list(list: Union[Tuple[Tensor, ...], List[Tensor]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = None) -> Tensor: ... +def _nested_tensor_softmax_with_shape(input: Tensor, query: Tensor) -> Tensor: ... +def _nested_view_from_buffer(input: Tensor, nested_size: Tensor, nested_strides: Tensor, offsets: Tensor) -> Tensor: ... +def _nested_view_from_buffer_copy(input: Tensor, nested_size: Tensor, nested_strides: Tensor, offsets: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _nested_view_from_jagged(input: Tensor, offsets: Tensor, dummy: Tensor, lengths: Optional[Tensor] = None, ragged_idx: _int = 1) -> Tensor: ... +def _nested_view_from_jagged_copy(input: Tensor, offsets: Tensor, dummy: Tensor, lengths: Optional[Tensor] = None, ragged_idx: _int = 1, *, out: Optional[Tensor] = None) -> Tensor: ... +def _nnpack_available() -> _bool: ... +def _nnpack_spatial_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ... +def _pack_padded_sequence(input: Tensor, lengths: Tensor, batch_first: _bool) -> Tuple[Tensor, Tensor]: ... +def _pad_packed_sequence(data: Tensor, batch_sizes: Tensor, batch_first: _bool, padding_value: Union[Number, _complex], total_length: _int) -> Tuple[Tensor, Tensor]: ... +def _pin_memory(input: Tensor, device: Optional[Optional[DeviceLikeType]] = None) -> Tensor: ... +def _prelu_kernel(input: Tensor, weight: Tensor) -> Tensor: ... +def _print(s: str) -> None: ... +def _propagate_xla_data(input: Tensor, output: Tensor) -> None: ... +def _remove_batch_dim(input: Tensor, level: _int, batch_size: _int, out_dim: _int) -> Tensor: ... +def _reshape_alias_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None) -> Tensor: ... +def _reshape_from_tensor(input: Tensor, shape: Tensor) -> Tensor: ... +def _resize_output_(input: Tensor, size: Sequence[Union[_int, SymInt]], device: Optional[DeviceLikeType]) -> Tensor: ... +def _rowwise_prune(weight: Tensor, mask: Tensor, compressed_indices_dtype: _dtype) -> Tuple[Tensor, Tensor]: ... +def _sample_dirichlet(input: Tensor, generator: Optional[Generator] = None) -> Tensor: ... +def _saturate_weight_to_fp16(weight: Tensor) -> Tensor: ... +def _scaled_dot_product_attention_math(query: Tensor, key: Tensor, value: Tensor, attn_mask: Optional[Tensor] = None, dropout_p: _float = 0.0, is_causal: _bool = False, dropout_mask: Optional[Tensor] = None, *, scale: Optional[_float] = None) -> Tuple[Tensor, Tensor]: ... +def _scaled_dot_product_cudnn_attention(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, return_debug_mask: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_cudnn_attention: ... +def _scaled_dot_product_efficient_attention(query: Tensor, key: Tensor, value: Tensor, attn_bias: Optional[Tensor], compute_log_sumexp: _bool, dropout_p: _float = 0.0, is_causal: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_efficient_attention: ... +def _scaled_dot_product_flash_attention(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, return_debug_mask: _bool = False, *, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_flash_attention: ... +def _scaled_dot_product_flash_attention_for_cpu(query: Tensor, key: Tensor, value: Tensor, dropout_p: _float = 0.0, is_causal: _bool = False, *, attn_mask: Optional[Tensor] = None, scale: Optional[_float] = None) -> torch.return_types._scaled_dot_product_flash_attention_for_cpu: ... +def _scaled_mm(input: Tensor, mat2: Tensor, *, bias: Optional[Tensor] = None, out_dtype: Optional[_dtype] = None, scale_a: Optional[Tensor] = None, scale_b: Optional[Tensor] = None, scale_result: Optional[Tensor] = None, use_fast_accum: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor]: ... +def _shape_as_tensor(input: Tensor) -> Tensor: ... +def _sobol_engine_draw(quasi: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int, dtype: Optional[_dtype]) -> Tuple[Tensor, Tensor]: ... +def _sobol_engine_ff_(input: Tensor, n: _int, sobolstate: Tensor, dimension: _int, num_generated: _int) -> Tensor: ... +def _sobol_engine_initialize_state_(input: Tensor, dimension: _int) -> Tensor: ... +def _sobol_engine_scramble_(input: Tensor, ltm: Tensor, dimension: _int) -> Tensor: ... +def _softmax(input: Tensor, dim: _int, half_to_float: _bool, *, out: Optional[Tensor] = None) -> Tensor: ... +def _softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input_dtype: _dtype, *, grad_input: Optional[Tensor] = None) -> Tensor: ... +def _sparse_broadcast_to(input: Tensor, size: _size) -> Tensor: ... +def _sparse_broadcast_to_copy(input: Tensor, size: _size, *, out: Optional[Tensor] = None) -> Tensor: ... +def _sparse_csr_prod(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ... +def _sparse_csr_sum(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, dtype: Optional[_dtype] = None) -> Tensor: ... +def _sparse_log_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ... +def _sparse_semi_structured_linear(input: Tensor, weight: Tensor, meta: Tensor, *, bias: Optional[Tensor] = None, activation: Optional[str] = None, out_dtype: Optional[_dtype] = None) -> Tensor: ... +def _sparse_softmax_backward_data(grad_output: Tensor, output: Tensor, dim: _int, input: Tensor) -> Tensor: ... +def _sparse_sparse_matmul(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def _sparse_sum(input: Tensor) -> Tensor: ... +@overload +def _sparse_sum(input: Tensor, *, dtype: _dtype) -> Tensor: ... +@overload +def _sparse_sum(input: Tensor, dim: Union[_int, _size]) -> Tensor: ... +@overload +def _sparse_sum(input: Tensor, dim: Union[_int, _size], *, dtype: _dtype) -> Tensor: ... +def _stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: ... +def _standard_gamma(input: Tensor, generator: Optional[Generator] = None) -> Tensor: ... +def _standard_gamma_grad(input: Tensor, output: Tensor) -> Tensor: ... +def _sync(t: Tensor) -> None: ... +@overload +def _test_autograd_multiple_dispatch(input: Tensor) -> Tensor: ... +@overload +def _test_autograd_multiple_dispatch(input: Tensor, b: _bool) -> Tensor: ... +def _test_autograd_multiple_dispatch_view(input: Tensor) -> Tensor: ... +def _test_autograd_multiple_dispatch_view_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _test_check_tensor(input: Tensor) -> Tensor: ... +def _test_functorch_fallback(input: Tensor, other: Tensor) -> Tensor: ... +def _test_parallel_materialize(input: Tensor, num_parallel: _int, skip_first: _bool = False) -> Tensor: ... +def _test_serialization_subcmul(input: Tensor, other: Tensor, alpha: Union[Number, _complex] = 1) -> Tensor: ... +def _to_cpu(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: ... +def _to_functional_tensor(t: Tensor) -> Tensor: ... +def _to_sparse_semi_structured(dense: Tensor) -> Tuple[Tensor, Tensor]: ... +def _transform_bias_rescale_qkv(qkv: Tensor, qkv_bias: Tensor, num_heads: _int) -> Tuple[Tensor, Tensor, Tensor]: ... +def _transformer_encoder_layer_fwd(src: Tensor, embed_dim: _int, num_heads: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, use_gelu: _bool, norm_first: _bool, eps: _float, norm_weight_1: Tensor, norm_bias_1: Tensor, norm_weight_2: Tensor, norm_bias_2: Tensor, ffn_weight_1: Tensor, ffn_bias_1: Tensor, ffn_weight_2: Tensor, ffn_bias_2: Tensor, mask: Optional[Tensor] = None, mask_type: Optional[_int] = None) -> Tensor: ... +def _trilinear(i1: Tensor, i2: Tensor, i3: Tensor, expand1: _size, expand2: _size, expand3: _size, sumdim: _size, unroll_dim: _int = 1) -> Tensor: ... +def _triton_multi_head_attention(query: Tensor, key: Tensor, value: Tensor, embed_dim: _int, num_head: _int, qkv_weight: Tensor, qkv_bias: Tensor, proj_weight: Tensor, proj_bias: Tensor, mask: Optional[Tensor] = None) -> Tensor: ... +def _triton_scaled_dot_attention(q: Tensor, k: Tensor, v: Tensor, dropout_p: _float = 0.0) -> Tensor: ... +def _unique(input: Tensor, sorted: _bool = True, return_inverse: _bool = False) -> Tuple[Tensor, Tensor]: ... +def _unique2(input: Tensor, sorted: _bool = True, return_inverse: _bool = False, return_counts: _bool = False) -> Tuple[Tensor, Tensor, Tensor]: ... +def _unpack_dual(dual: Tensor, level: _int) -> torch.return_types._unpack_dual: ... +def _unsafe_index(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]]) -> Tensor: ... +def _unsafe_index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ... +@overload +def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int) -> _bool: ... +@overload +def _use_cudnn_ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int) -> _bool: ... +def _use_cudnn_rnn_flatten_weight() -> _bool: ... +def _validate_compressed_sparse_indices(is_crow: _bool, compressed_idx: Tensor, plain_idx: Tensor, cdim: _int, dim: _int, nnz: _int) -> None: ... +def _validate_sparse_bsc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ... +def _validate_sparse_bsr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ... +def _validate_sparse_compressed_tensor_args(compressed_indices: Tensor, plain_indices: Tensor, values: Tensor, size: _size, layout: _layout) -> None: ... +def _validate_sparse_coo_tensor_args(indices: Tensor, values: Tensor, size: _size, is_coalesced: Optional[_bool] = None) -> None: ... +def _validate_sparse_csc_tensor_args(ccol_indices: Tensor, row_indices: Tensor, values: Tensor, size: _size) -> None: ... +def _validate_sparse_csr_tensor_args(crow_indices: Tensor, col_indices: Tensor, values: Tensor, size: _size) -> None: ... +def _values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def _weight_int4pack_mm(input: Tensor, mat2: Tensor, qGroupSize: _int, qScaleAndZeros: Tensor) -> Tensor: ... +def _weight_int8pack_mm(input: Tensor, mat2: Tensor, scales: Tensor) -> Tensor: ... +def _weight_norm(v: Tensor, g: Tensor, dim: _int = 0) -> Tensor: ... +def _weight_norm_interface(v: Tensor, g: Tensor, dim: _int = 0) -> Tuple[Tensor, Tensor]: ... +def abs(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + abs(input, *, out=None) -> Tensor + + Computes the absolute value of each element in :attr:`input`. + + .. math:: + \text{out}_{i} = |\text{input}_{i}| + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.abs(torch.tensor([-1, -2, 3])) + tensor([ 1, 2, 3]) + """ + ... +def abs_(input: Tensor) -> Tensor: ... +def absolute(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + absolute(input, *, out=None) -> Tensor + + Alias for :func:`torch.abs` + """ + ... +def acos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + acos(input, *, out=None) -> Tensor + + Computes the inverse cosine of each element in :attr:`input`. + + .. math:: + \text{out}_{i} = \cos^{-1}(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.3348, -0.5889, 0.2005, -0.1584]) + >>> torch.acos(a) + tensor([ 1.2294, 2.2004, 1.3690, 1.7298]) + """ + ... +def acos_(input: Tensor) -> Tensor: ... +def acosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + acosh(input, *, out=None) -> Tensor + + Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \cosh^{-1}(\text{input}_{i}) + + Note: + The domain of the inverse hyperbolic cosine is `[1, inf)` and values outside this range + will be mapped to ``NaN``, except for `+ INF` for which the output is mapped to `+ INF`. + + Args: + input (Tensor): the input tensor. + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4).uniform_(1, 2) + >>> a + tensor([ 1.3192, 1.9915, 1.9674, 1.7151 ]) + >>> torch.acosh(a) + tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ]) + """ + ... +def acosh_(input: Tensor) -> Tensor: ... +def adaptive_avg_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tensor: ... +def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size]) -> Tuple[Tensor, Tensor]: ... +@overload +def add(input: Union[Tensor, Number, _complex], other: Union[Tensor, Number, _complex], *, alpha: Optional[Union[Number, _complex]] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + add(input, other, *, alpha=1, out=None) -> Tensor + + Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`. + + .. math:: + \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i + + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer, float, and complex inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor or Number): the tensor or number to add to :attr:`input`. + + Keyword arguments: + alpha (Number): the multiplier for :attr:`other`. + out (Tensor, optional): the output tensor. + + Examples:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.0202, 1.0985, 1.3506, -0.6056]) + >>> torch.add(a, 20) + tensor([ 20.0202, 21.0985, 21.3506, 19.3944]) + + >>> b = torch.randn(4) + >>> b + tensor([-0.9732, -0.3497, 0.6245, 0.4022]) + >>> c = torch.randn(4, 1) + >>> c + tensor([[ 0.3743], + [-1.7724], + [-0.5811], + [-0.8017]]) + >>> torch.add(b, c, alpha=10) + tensor([[ 2.7695, 3.3930, 4.3672, 4.1450], + [-18.6971, -18.0736, -17.0994, -17.3216], + [ -6.7845, -6.1610, -5.1868, -5.4090], + [ -8.9902, -8.3667, -7.3925, -7.6147]]) + """ + ... +@overload +def add(self: Tensor, alpha: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + add(input, other, *, alpha=1, out=None) -> Tensor + + Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`. + + .. math:: + \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i + + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer, float, and complex inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor or Number): the tensor or number to add to :attr:`input`. + + Keyword arguments: + alpha (Number): the multiplier for :attr:`other`. + out (Tensor, optional): the output tensor. + + Examples:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.0202, 1.0985, 1.3506, -0.6056]) + >>> torch.add(a, 20) + tensor([ 20.0202, 21.0985, 21.3506, 19.3944]) + + >>> b = torch.randn(4) + >>> b + tensor([-0.9732, -0.3497, 0.6245, 0.4022]) + >>> c = torch.randn(4, 1) + >>> c + tensor([[ 0.3743], + [-1.7724], + [-0.5811], + [-0.8017]]) + >>> torch.add(b, c, alpha=10) + tensor([[ 2.7695, 3.3930, 4.3672, 4.1450], + [-18.6971, -18.0736, -17.0994, -17.3216], + [ -6.7845, -6.1610, -5.1868, -5.4090], + [ -8.9902, -8.3667, -7.3925, -7.6147]]) + """ + ... +@overload +def add(self: Tensor, alpha: Union[Number, _complex], other: Tensor, *, out: Tensor) -> Tensor: + r""" + add(input, other, *, alpha=1, out=None) -> Tensor + + Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`. + + .. math:: + \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i + + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer, float, and complex inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor or Number): the tensor or number to add to :attr:`input`. + + Keyword arguments: + alpha (Number): the multiplier for :attr:`other`. + out (Tensor, optional): the output tensor. + + Examples:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.0202, 1.0985, 1.3506, -0.6056]) + >>> torch.add(a, 20) + tensor([ 20.0202, 21.0985, 21.3506, 19.3944]) + + >>> b = torch.randn(4) + >>> b + tensor([-0.9732, -0.3497, 0.6245, 0.4022]) + >>> c = torch.randn(4, 1) + >>> c + tensor([[ 0.3743], + [-1.7724], + [-0.5811], + [-0.8017]]) + >>> torch.add(b, c, alpha=10) + tensor([[ 2.7695, 3.3930, 4.3672, 4.1450], + [-18.6971, -18.0736, -17.0994, -17.3216], + [ -6.7845, -6.1610, -5.1868, -5.4090], + [ -8.9902, -8.3667, -7.3925, -7.6147]]) + """ + ... +@overload +def addbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor) -> Tensor: + r""" + addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices stored + in :attr:`batch1` and :attr:`batch2`, + with a reduced add step (all matrix multiplications get accumulated + along the first dimension). + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the + same number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + .. math:: + out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha` + must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + input (Tensor): matrix to be added + alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.addbmm(M, batch1, batch2) + tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653], + [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743], + [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]]) + """ + ... +@overload +def addbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: + r""" + addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices stored + in :attr:`batch1` and :attr:`batch2`, + with a reduced add step (all matrix multiplications get accumulated + along the first dimension). + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the + same number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + .. math:: + out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha` + must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + input (Tensor): matrix to be added + alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.addbmm(M, batch1, batch2) + tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653], + [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743], + [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]]) + """ + ... +@overload +def addbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices stored + in :attr:`batch1` and :attr:`batch2`, + with a reduced add step (all matrix multiplications get accumulated + along the first dimension). + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the + same number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + .. math:: + out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha` + must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + input (Tensor): matrix to be added + alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.addbmm(M, batch1, batch2) + tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653], + [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743], + [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]]) + """ + ... +@overload +def addbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor: + r""" + addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices stored + in :attr:`batch1` and :attr:`batch2`, + with a reduced add step (all matrix multiplications get accumulated + along the first dimension). + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the + same number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + .. math:: + out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha` + must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + input (Tensor): matrix to be added + alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.addbmm(M, batch1, batch2) + tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653], + [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743], + [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]]) + """ + ... +@overload +def addbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: + r""" + addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices stored + in :attr:`batch1` and :attr:`batch2`, + with a reduced add step (all matrix multiplications get accumulated + along the first dimension). + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the + same number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + .. math:: + out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha` + must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + input (Tensor): matrix to be added + alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.addbmm(M, batch1, batch2) + tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653], + [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743], + [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]]) + """ + ... +@overload +def addcdiv(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor) -> Tensor: + r""" + addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor + + Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`, + multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`. + + .. warning:: + Integer division with addcdiv is no longer supported, and in a future + release addcdiv will perform a true division of tensor1 and tensor2. + The historic addcdiv behavior can be implemented as + (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype) + for integer inputs and as (input + value * tensor1 / tensor2) for float inputs. + The future addcdiv behavior is just the latter implementation: + (input + value * tensor1 / tensor2), for all dtypes. + + .. math:: + \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i} + + + The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be + :ref:`broadcastable `. + + For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be + a real number, otherwise an integer. + + Args: + input (Tensor): the tensor to be added + tensor1 (Tensor): the numerator tensor + tensor2 (Tensor): the denominator tensor + + Keyword args: + value (Number, optional): multiplier for :math:`\text{tensor1} / \text{tensor2}` + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.randn(1, 3) + >>> t1 = torch.randn(3, 1) + >>> t2 = torch.randn(1, 3) + >>> torch.addcdiv(t, t1, t2, value=0.1) + tensor([[-0.2312, -3.6496, 0.1312], + [-1.0428, 3.4292, -0.1030], + [-0.5369, -0.9829, 0.0430]]) + """ + ... +@overload +def addcdiv(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor: + r""" + addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor + + Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`, + multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`. + + .. warning:: + Integer division with addcdiv is no longer supported, and in a future + release addcdiv will perform a true division of tensor1 and tensor2. + The historic addcdiv behavior can be implemented as + (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype) + for integer inputs and as (input + value * tensor1 / tensor2) for float inputs. + The future addcdiv behavior is just the latter implementation: + (input + value * tensor1 / tensor2), for all dtypes. + + .. math:: + \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i} + + + The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be + :ref:`broadcastable `. + + For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be + a real number, otherwise an integer. + + Args: + input (Tensor): the tensor to be added + tensor1 (Tensor): the numerator tensor + tensor2 (Tensor): the denominator tensor + + Keyword args: + value (Number, optional): multiplier for :math:`\text{tensor1} / \text{tensor2}` + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.randn(1, 3) + >>> t1 = torch.randn(3, 1) + >>> t2 = torch.randn(1, 3) + >>> torch.addcdiv(t, t1, t2, value=0.1) + tensor([[-0.2312, -3.6496, 0.1312], + [-1.0428, 3.4292, -0.1030], + [-0.5369, -0.9829, 0.0430]]) + """ + ... +@overload +def addcdiv(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor + + Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`, + multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`. + + .. warning:: + Integer division with addcdiv is no longer supported, and in a future + release addcdiv will perform a true division of tensor1 and tensor2. + The historic addcdiv behavior can be implemented as + (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype) + for integer inputs and as (input + value * tensor1 / tensor2) for float inputs. + The future addcdiv behavior is just the latter implementation: + (input + value * tensor1 / tensor2), for all dtypes. + + .. math:: + \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i} + + + The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be + :ref:`broadcastable `. + + For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be + a real number, otherwise an integer. + + Args: + input (Tensor): the tensor to be added + tensor1 (Tensor): the numerator tensor + tensor2 (Tensor): the denominator tensor + + Keyword args: + value (Number, optional): multiplier for :math:`\text{tensor1} / \text{tensor2}` + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.randn(1, 3) + >>> t1 = torch.randn(3, 1) + >>> t2 = torch.randn(1, 3) + >>> torch.addcdiv(t, t1, t2, value=0.1) + tensor([[-0.2312, -3.6496, 0.1312], + [-1.0428, 3.4292, -0.1030], + [-0.5369, -0.9829, 0.0430]]) + """ + ... +@overload +def addcmul(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor) -> Tensor: + r""" + addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor + + Performs the element-wise multiplication of :attr:`tensor1` + by :attr:`tensor2`, multiplies the result by the scalar :attr:`value` + and adds it to :attr:`input`. + + .. math:: + \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i + + The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be + :ref:`broadcastable `. + + For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be + a real number, otherwise an integer. + + Args: + input (Tensor): the tensor to be added + tensor1 (Tensor): the tensor to be multiplied + tensor2 (Tensor): the tensor to be multiplied + + Keyword args: + value (Number, optional): multiplier for :math:`tensor1 .* tensor2` + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.randn(1, 3) + >>> t1 = torch.randn(3, 1) + >>> t2 = torch.randn(1, 3) + >>> torch.addcmul(t, t1, t2, value=0.1) + tensor([[-0.8635, -0.6391, 1.6174], + [-0.7617, -0.5879, 1.7388], + [-0.8353, -0.6249, 1.6511]]) + """ + ... +@overload +def addcmul(self: Tensor, value: Union[Number, _complex], tensor1: Tensor, tensor2: Tensor, *, out: Tensor) -> Tensor: + r""" + addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor + + Performs the element-wise multiplication of :attr:`tensor1` + by :attr:`tensor2`, multiplies the result by the scalar :attr:`value` + and adds it to :attr:`input`. + + .. math:: + \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i + + The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be + :ref:`broadcastable `. + + For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be + a real number, otherwise an integer. + + Args: + input (Tensor): the tensor to be added + tensor1 (Tensor): the tensor to be multiplied + tensor2 (Tensor): the tensor to be multiplied + + Keyword args: + value (Number, optional): multiplier for :math:`tensor1 .* tensor2` + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.randn(1, 3) + >>> t1 = torch.randn(3, 1) + >>> t2 = torch.randn(1, 3) + >>> torch.addcmul(t, t1, t2, value=0.1) + tensor([[-0.8635, -0.6391, 1.6174], + [-0.7617, -0.5879, 1.7388], + [-0.8353, -0.6249, 1.6511]]) + """ + ... +@overload +def addcmul(input: Tensor, tensor1: Tensor, tensor2: Tensor, *, value: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor + + Performs the element-wise multiplication of :attr:`tensor1` + by :attr:`tensor2`, multiplies the result by the scalar :attr:`value` + and adds it to :attr:`input`. + + .. math:: + \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i + + The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be + :ref:`broadcastable `. + + For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be + a real number, otherwise an integer. + + Args: + input (Tensor): the tensor to be added + tensor1 (Tensor): the tensor to be multiplied + tensor2 (Tensor): the tensor to be multiplied + + Keyword args: + value (Number, optional): multiplier for :math:`tensor1 .* tensor2` + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.randn(1, 3) + >>> t1 = torch.randn(3, 1) + >>> t2 = torch.randn(1, 3) + >>> torch.addcmul(t, t1, t2, value=0.1) + tensor([[-0.8635, -0.6391, 1.6174], + [-0.7617, -0.5879, 1.7388], + [-0.8353, -0.6249, 1.6511]]) + """ + ... +@overload +def addmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor) -> Tensor: + r""" + addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. + The matrix :attr:`input` is added to the final result. + + If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a + :math:`(m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operation has support for arguments with :ref:`sparse layouts`. If + :attr:`input` is sparse the result will have the same layout and if :attr:`out` + is provided it must have the same layout as :attr:`input`. + + + .. warning:: + Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, + or may not have autograd support. If you notice missing functionality please + open a feature request. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): matrix to be added + mat1 (Tensor): the first matrix to be matrix multiplied + mat2 (Tensor): the second matrix to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2, 3) + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.addmm(M, mat1, mat2) + tensor([[-4.8716, 1.4671, -1.3746], + [ 0.7573, -3.9555, -2.8681]]) + """ + ... +@overload +def addmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor: + r""" + addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. + The matrix :attr:`input` is added to the final result. + + If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a + :math:`(m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operation has support for arguments with :ref:`sparse layouts`. If + :attr:`input` is sparse the result will have the same layout and if :attr:`out` + is provided it must have the same layout as :attr:`input`. + + + .. warning:: + Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, + or may not have autograd support. If you notice missing functionality please + open a feature request. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): matrix to be added + mat1 (Tensor): the first matrix to be matrix multiplied + mat2 (Tensor): the second matrix to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2, 3) + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.addmm(M, mat1, mat2) + tensor([[-4.8716, 1.4671, -1.3746], + [ 0.7573, -3.9555, -2.8681]]) + """ + ... +@overload +def addmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. + The matrix :attr:`input` is added to the final result. + + If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a + :math:`(m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operation has support for arguments with :ref:`sparse layouts`. If + :attr:`input` is sparse the result will have the same layout and if :attr:`out` + is provided it must have the same layout as :attr:`input`. + + + .. warning:: + Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, + or may not have autograd support. If you notice missing functionality please + open a feature request. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): matrix to be added + mat1 (Tensor): the first matrix to be matrix multiplied + mat2 (Tensor): the second matrix to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2, 3) + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.addmm(M, mat1, mat2) + tensor([[-4.8716, 1.4671, -1.3746], + [ 0.7573, -3.9555, -2.8681]]) + """ + ... +@overload +def addmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor: + r""" + addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. + The matrix :attr:`input` is added to the final result. + + If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a + :math:`(m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operation has support for arguments with :ref:`sparse layouts`. If + :attr:`input` is sparse the result will have the same layout and if :attr:`out` + is provided it must have the same layout as :attr:`input`. + + + .. warning:: + Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, + or may not have autograd support. If you notice missing functionality please + open a feature request. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): matrix to be added + mat1 (Tensor): the first matrix to be matrix multiplied + mat2 (Tensor): the second matrix to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2, 3) + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.addmm(M, mat1, mat2) + tensor([[-4.8716, 1.4671, -1.3746], + [ 0.7573, -3.9555, -2.8681]]) + """ + ... +@overload +def addmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor, *, out: Tensor) -> Tensor: + r""" + addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. + The matrix :attr:`input` is added to the final result. + + If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a + :math:`(m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a :math:`(n \times p)` tensor + and :attr:`out` will be a :math:`(n \times p)` tensor. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operation has support for arguments with :ref:`sparse layouts`. If + :attr:`input` is sparse the result will have the same layout and if :attr:`out` + is provided it must have the same layout as :attr:`input`. + + + .. warning:: + Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, + or may not have autograd support. If you notice missing functionality please + open a feature request. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): matrix to be added + mat1 (Tensor): the first matrix to be matrix multiplied + mat2 (Tensor): the second matrix to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2, 3) + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.addmm(M, mat1, mat2) + tensor([[-4.8716, 1.4671, -1.3746], + [ 0.7573, -3.9555, -2.8681]]) + """ + ... +@overload +def addmv(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor) -> Tensor: + r""" + addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix-vector product of the matrix :attr:`mat` and + the vector :attr:`vec`. + The vector :attr:`input` is added to the final result. + + If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of + size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a 1-D tensor of size `n` and + :attr:`out` will be 1-D tensor of size `n`. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + Args: + input (Tensor): vector to be added + mat (Tensor): matrix to be matrix multiplied + vec (Tensor): vector to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2) + >>> mat = torch.randn(2, 3) + >>> vec = torch.randn(3) + >>> torch.addmv(M, mat, vec) + tensor([-0.3768, -5.5565]) + """ + ... +@overload +def addmv(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor: + r""" + addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix-vector product of the matrix :attr:`mat` and + the vector :attr:`vec`. + The vector :attr:`input` is added to the final result. + + If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of + size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a 1-D tensor of size `n` and + :attr:`out` will be 1-D tensor of size `n`. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + Args: + input (Tensor): vector to be added + mat (Tensor): matrix to be matrix multiplied + vec (Tensor): vector to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2) + >>> mat = torch.randn(2, 3) + >>> vec = torch.randn(3) + >>> torch.addmv(M, mat, vec) + tensor([-0.3768, -5.5565]) + """ + ... +@overload +def addmv(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix-vector product of the matrix :attr:`mat` and + the vector :attr:`vec`. + The vector :attr:`input` is added to the final result. + + If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of + size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a 1-D tensor of size `n` and + :attr:`out` will be 1-D tensor of size `n`. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + Args: + input (Tensor): vector to be added + mat (Tensor): matrix to be matrix multiplied + vec (Tensor): vector to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2) + >>> mat = torch.randn(2, 3) + >>> vec = torch.randn(3) + >>> torch.addmv(M, mat, vec) + tensor([-0.3768, -5.5565]) + """ + ... +@overload +def addmv(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor) -> Tensor: + r""" + addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix-vector product of the matrix :attr:`mat` and + the vector :attr:`vec`. + The vector :attr:`input` is added to the final result. + + If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of + size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a 1-D tensor of size `n` and + :attr:`out` will be 1-D tensor of size `n`. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + Args: + input (Tensor): vector to be added + mat (Tensor): matrix to be matrix multiplied + vec (Tensor): vector to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2) + >>> mat = torch.randn(2, 3) + >>> vec = torch.randn(3) + >>> torch.addmv(M, mat, vec) + tensor([-0.3768, -5.5565]) + """ + ... +@overload +def addmv(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor, *, out: Tensor) -> Tensor: + r""" + addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a matrix-vector product of the matrix :attr:`mat` and + the vector :attr:`vec`. + The vector :attr:`input` is added to the final result. + + If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of + size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a 1-D tensor of size `n` and + :attr:`out` will be 1-D tensor of size `n`. + + :attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between + :attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + Args: + input (Tensor): vector to be added + mat (Tensor): matrix to be matrix multiplied + vec (Tensor): vector to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(2) + >>> mat = torch.randn(2, 3) + >>> vec = torch.randn(3) + >>> torch.addmv(M, mat, vec) + tensor([-0.3768, -5.5565]) + """ + ... +@overload +def addmv_(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat: Tensor, vec: Tensor) -> Tensor: ... +@overload +def addmv_(input: Tensor, mat: Tensor, vec: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1) -> Tensor: ... +@overload +def addmv_(beta: Union[Number, _complex], self: Tensor, mat: Tensor, vec: Tensor) -> Tensor: ... +@overload +def addr(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], vec1: Tensor, vec2: Tensor) -> Tensor: + r""" + addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2` + and adds it to the matrix :attr:`input`. + + Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the + outer product between :attr:`vec1` and :attr:`vec2` and the added matrix + :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector + of size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a matrix of size + :math:`(n \times m)` and :attr:`out` will be a matrix of size + :math:`(n \times m)`. + + Args: + input (Tensor): matrix to be added + vec1 (Tensor): the first vector of the outer product + vec2 (Tensor): the second vector of the outer product + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> vec1 = torch.arange(1., 4.) + >>> vec2 = torch.arange(1., 3.) + >>> M = torch.zeros(3, 2) + >>> torch.addr(M, vec1, vec2) + tensor([[ 1., 2.], + [ 2., 4.], + [ 3., 6.]]) + """ + ... +@overload +def addr(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor: + r""" + addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2` + and adds it to the matrix :attr:`input`. + + Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the + outer product between :attr:`vec1` and :attr:`vec2` and the added matrix + :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector + of size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a matrix of size + :math:`(n \times m)` and :attr:`out` will be a matrix of size + :math:`(n \times m)`. + + Args: + input (Tensor): matrix to be added + vec1 (Tensor): the first vector of the outer product + vec2 (Tensor): the second vector of the outer product + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> vec1 = torch.arange(1., 4.) + >>> vec2 = torch.arange(1., 3.) + >>> M = torch.zeros(3, 2) + >>> torch.addr(M, vec1, vec2) + tensor([[ 1., 2.], + [ 2., 4.], + [ 3., 6.]]) + """ + ... +@overload +def addr(input: Tensor, vec1: Tensor, vec2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2` + and adds it to the matrix :attr:`input`. + + Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the + outer product between :attr:`vec1` and :attr:`vec2` and the added matrix + :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector + of size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a matrix of size + :math:`(n \times m)` and :attr:`out` will be a matrix of size + :math:`(n \times m)`. + + Args: + input (Tensor): matrix to be added + vec1 (Tensor): the first vector of the outer product + vec2 (Tensor): the second vector of the outer product + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> vec1 = torch.arange(1., 4.) + >>> vec2 = torch.arange(1., 3.) + >>> M = torch.zeros(3, 2) + >>> torch.addr(M, vec1, vec2) + tensor([[ 1., 2.], + [ 2., 4.], + [ 3., 6.]]) + """ + ... +@overload +def addr(beta: Union[Number, _complex], self: Tensor, vec1: Tensor, vec2: Tensor) -> Tensor: + r""" + addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2` + and adds it to the matrix :attr:`input`. + + Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the + outer product between :attr:`vec1` and :attr:`vec2` and the added matrix + :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector + of size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a matrix of size + :math:`(n \times m)` and :attr:`out` will be a matrix of size + :math:`(n \times m)`. + + Args: + input (Tensor): matrix to be added + vec1 (Tensor): the first vector of the outer product + vec2 (Tensor): the second vector of the outer product + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> vec1 = torch.arange(1., 4.) + >>> vec2 = torch.arange(1., 3.) + >>> M = torch.zeros(3, 2) + >>> torch.addr(M, vec1, vec2) + tensor([[ 1., 2.], + [ 2., 4.], + [ 3., 6.]]) + """ + ... +@overload +def addr(beta: Union[Number, _complex], self: Tensor, vec1: Tensor, vec2: Tensor, *, out: Tensor) -> Tensor: + r""" + addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2` + and adds it to the matrix :attr:`input`. + + Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the + outer product between :attr:`vec1` and :attr:`vec2` and the added matrix + :attr:`input` respectively. + + .. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2}) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector + of size `m`, then :attr:`input` must be + :ref:`broadcastable ` with a matrix of size + :math:`(n \times m)` and :attr:`out` will be a matrix of size + :math:`(n \times m)`. + + Args: + input (Tensor): matrix to be added + vec1 (Tensor): the first vector of the outer product + vec2 (Tensor): the second vector of the outer product + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{vec1} \otimes \text{vec2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> vec1 = torch.arange(1., 4.) + >>> vec2 = torch.arange(1., 3.) + >>> M = torch.zeros(3, 2) + >>> torch.addr(M, vec1, vec2) + tensor([[ 1., 2.], + [ 2., 4.], + [ 3., 6.]]) + """ + ... +def adjoint(input: Tensor) -> Tensor: + r""" + adjoint(Tensor) -> Tensor + Returns a view of the tensor conjugated and with the last two dimensions transposed. + + ``x.adjoint()`` is equivalent to ``x.transpose(-2, -1).conj()`` for complex tensors and + to ``x.transpose(-2, -1)`` for real tensors. + + Example:: + >>> x = torch.arange(4, dtype=torch.float) + >>> A = torch.complex(x, x).reshape(2, 2) + >>> A + tensor([[0.+0.j, 1.+1.j], + [2.+2.j, 3.+3.j]]) + >>> A.adjoint() + tensor([[0.-0.j, 2.-2.j], + [1.-1.j, 3.-3.j]]) + >>> (A.adjoint() == A.mH).all() + tensor(True) + """ + ... +def affine_grid_generator(theta: Tensor, size: Sequence[Union[_int, SymInt]], align_corners: _bool) -> Tensor: ... +def alias_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.alias`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def all(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + all(input) -> Tensor + + Tests if all elements in :attr:`input` evaluate to `True`. + + .. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + + Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.all(a) + tensor(False, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.all(a) + tensor(False) + + .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + + For each row of :attr:`input` in the given dimension :attr:`dim`, + returns `True` if all elements in the row evaluate to `True` and `False` otherwise. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.rand(4, 2).bool() + >>> a + tensor([[True, True], + [True, False], + [True, True], + [True, True]], dtype=torch.bool) + >>> torch.all(a, dim=1) + tensor([ True, False, True, True], dtype=torch.bool) + >>> torch.all(a, dim=0) + tensor([ True, False], dtype=torch.bool) + """ + ... +@overload +def all(input: Tensor, dim: Optional[_size] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + all(input) -> Tensor + + Tests if all elements in :attr:`input` evaluate to `True`. + + .. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + + Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.all(a) + tensor(False, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.all(a) + tensor(False) + + .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + + For each row of :attr:`input` in the given dimension :attr:`dim`, + returns `True` if all elements in the row evaluate to `True` and `False` otherwise. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.rand(4, 2).bool() + >>> a + tensor([[True, True], + [True, False], + [True, True], + [True, True]], dtype=torch.bool) + >>> torch.all(a, dim=1) + tensor([ True, False, True, True], dtype=torch.bool) + >>> torch.all(a, dim=0) + tensor([ True, False], dtype=torch.bool) + """ + ... +@overload +def all(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + all(input) -> Tensor + + Tests if all elements in :attr:`input` evaluate to `True`. + + .. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + + Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.all(a) + tensor(False, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.all(a) + tensor(False) + + .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + + For each row of :attr:`input` in the given dimension :attr:`dim`, + returns `True` if all elements in the row evaluate to `True` and `False` otherwise. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.rand(4, 2).bool() + >>> a + tensor([[True, True], + [True, False], + [True, True], + [True, True]], dtype=torch.bool) + >>> torch.all(a, dim=1) + tensor([ True, False, True, True], dtype=torch.bool) + >>> torch.all(a, dim=0) + tensor([ True, False], dtype=torch.bool) + """ + ... +@overload +def all(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + all(input) -> Tensor + + Tests if all elements in :attr:`input` evaluate to `True`. + + .. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + + Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.all(a) + tensor(False, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.all(a) + tensor(False) + + .. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + + For each row of :attr:`input` in the given dimension :attr:`dim`, + returns `True` if all elements in the row evaluate to `True` and `False` otherwise. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.rand(4, 2).bool() + >>> a + tensor([[True, True], + [True, False], + [True, True], + [True, True]], dtype=torch.bool) + >>> torch.all(a, dim=1) + tensor([ True, False, True, True], dtype=torch.bool) + >>> torch.all(a, dim=0) + tensor([ True, False], dtype=torch.bool) + """ + ... +def allclose(input: Tensor, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> _bool: + r""" + allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> bool + + This function checks if :attr:`input` and :attr:`other` satisfy the condition: + + .. math:: + \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert + + elementwise, for all elements of :attr:`input` and :attr:`other`. The behaviour of this function is analogous to + `numpy.allclose `_ + + Args: + input (Tensor): first tensor to compare + other (Tensor): second tensor to compare + atol (float, optional): absolute tolerance. Default: 1e-08 + rtol (float, optional): relative tolerance. Default: 1e-05 + equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False`` + + Example:: + + >>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08])) + False + >>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09])) + True + >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')])) + False + >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True) + True + """ + ... +def alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def amax(input: Tensor, dim: Union[_int, _size] = (), keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + amax(input, dim, keepdim=False, *, out=None) -> Tensor + + Returns the maximum value of each slice of the :attr:`input` tensor in the given + dimension(s) :attr:`dim`. + + .. note:: + The difference between ``max``/``min`` and ``amax``/``amin`` is: + - ``amax``/``amin`` supports reducing on multiple dimensions, + - ``amax``/``amin`` does not return indices, + - ``amax``/``amin`` evenly distributes gradient between equal values, + while ``max(dim)``/``min(dim)`` propagates gradient only to a single + index in the source tensor. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.8177, 1.4878, -0.2491, 0.9130], + [-0.7158, 1.1775, 2.0992, 0.4817], + [-0.0053, 0.0164, -1.3738, -0.0507], + [ 1.9700, 1.1106, -1.0318, -1.0816]]) + >>> torch.amax(a, 1) + tensor([1.4878, 2.0992, 0.0164, 1.9700]) + """ + ... +def amin(input: Tensor, dim: Union[_int, _size] = (), keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + amin(input, dim, keepdim=False, *, out=None) -> Tensor + + Returns the minimum value of each slice of the :attr:`input` tensor in the given + dimension(s) :attr:`dim`. + + .. note:: + The difference between ``max``/``min`` and ``amax``/``amin`` is: + - ``amax``/``amin`` supports reducing on multiple dimensions, + - ``amax``/``amin`` does not return indices, + - ``amax``/``amin`` evenly distributes gradient between equal values, + while ``max(dim)``/``min(dim)`` propagates gradient only to a single + index in the source tensor. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.6451, -0.4866, 0.2987, -1.3312], + [-0.5744, 1.2980, 1.8397, -0.2713], + [ 0.9128, 0.9214, -1.7268, -0.2995], + [ 0.9023, 0.4853, 0.9075, -1.6165]]) + >>> torch.amin(a, 1) + tensor([-1.3312, -0.5744, -1.7268, -1.6165]) + """ + ... +def aminmax(input: Tensor, *, dim: Optional[_int] = None, keepdim: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.aminmax: + r""" + aminmax(input, *, dim=None, keepdim=False, out=None) -> (Tensor min, Tensor max) + + Computes the minimum and maximum values of the :attr:`input` tensor. + + Args: + input (Tensor): + The input tensor + + Keyword Args: + dim (Optional[int]): + The dimension along which to compute the values. If `None`, + computes the values over the entire :attr:`input` tensor. + Default is `None`. + keepdim (bool): + If `True`, the reduced dimensions will be kept in the output + tensor as dimensions with size 1 for broadcasting, otherwise + they will be removed, as if calling (:func:`torch.squeeze`). + Default is `False`. + out (Optional[Tuple[Tensor, Tensor]]): + Optional tensors on which to write the result. Must have the same + shape and dtype as the expected output. + Default is `None`. + + Returns: + A named tuple `(min, max)` containing the minimum and maximum values. + + Raises: + RuntimeError + If any of the dimensions to compute the values over has size 0. + + .. note:: + NaN values are propagated to the output if at least one value is NaN. + + .. seealso:: + :func:`torch.amin` computes just the minimum value + :func:`torch.amax` computes just the maximum value + + Example:: + + >>> torch.aminmax(torch.tensor([1, -3, 5])) + torch.return_types.aminmax( + min=tensor(-3), + max=tensor(5)) + + >>> # aminmax propagates NaNs + >>> torch.aminmax(torch.tensor([1, -3, 5, torch.nan])) + torch.return_types.aminmax( + min=tensor(nan), + max=tensor(nan)) + + >>> t = torch.arange(10).view(2, 5) + >>> t + tensor([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> t.aminmax(dim=0, keepdim=True) + torch.return_types.aminmax( + min=tensor([[0, 1, 2, 3, 4]]), + max=tensor([[5, 6, 7, 8, 9]])) + """ + ... +def angle(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + angle(input, *, out=None) -> Tensor + + Computes the element-wise angle (in radians) of the given :attr:`input` tensor. + + .. math:: + \text{out}_{i} = angle(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + .. note:: Starting in PyTorch 1.8, angle returns pi for negative real numbers, + zero for non-negative real numbers, and propagates NaNs. Previously + the function would return zero for all real numbers and not propagate + floating-point NaNs. + + Example:: + + >>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159 + tensor([ 135., 135, -45]) + """ + ... +@overload +def any(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + any(input) -> Tensor + + Tests if any element in :attr:`input` evaluates to `True`. + + .. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + + Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.any(a) + tensor(True, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.any(a) + tensor(True) + + .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + + For each row of :attr:`input` in the given dimension :attr:`dim`, + returns `True` if any element in the row evaluate to `True` and `False` otherwise. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4, 2) < 0 + >>> a + tensor([[ True, True], + [False, True], + [ True, True], + [False, False]]) + >>> torch.any(a, 1) + tensor([ True, True, True, False]) + >>> torch.any(a, 0) + tensor([True, True]) + """ + ... +@overload +def any(input: Tensor, dim: Optional[_size] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + any(input) -> Tensor + + Tests if any element in :attr:`input` evaluates to `True`. + + .. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + + Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.any(a) + tensor(True, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.any(a) + tensor(True) + + .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + + For each row of :attr:`input` in the given dimension :attr:`dim`, + returns `True` if any element in the row evaluate to `True` and `False` otherwise. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4, 2) < 0 + >>> a + tensor([[ True, True], + [False, True], + [ True, True], + [False, False]]) + >>> torch.any(a, 1) + tensor([ True, True, True, False]) + >>> torch.any(a, 0) + tensor([True, True]) + """ + ... +@overload +def any(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + any(input) -> Tensor + + Tests if any element in :attr:`input` evaluates to `True`. + + .. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + + Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.any(a) + tensor(True, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.any(a) + tensor(True) + + .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + + For each row of :attr:`input` in the given dimension :attr:`dim`, + returns `True` if any element in the row evaluate to `True` and `False` otherwise. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4, 2) < 0 + >>> a + tensor([[ True, True], + [False, True], + [ True, True], + [False, False]]) + >>> torch.any(a, 1) + tensor([ True, True, True, False]) + >>> torch.any(a, 0) + tensor([True, True]) + """ + ... +@overload +def any(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + any(input) -> Tensor + + Tests if any element in :attr:`input` evaluates to `True`. + + .. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + + Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.any(a) + tensor(True, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.any(a) + tensor(True) + + .. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + + For each row of :attr:`input` in the given dimension :attr:`dim`, + returns `True` if any element in the row evaluate to `True` and `False` otherwise. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4, 2) < 0 + >>> a + tensor([[ True, True], + [False, True], + [ True, True], + [False, False]]) + >>> torch.any(a, 1) + tensor([ True, True, True, False]) + >>> torch.any(a, 0) + tensor([True, True]) + """ + ... +@overload +def arange(start: Number, end: Number, step: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil` + with values from the interval ``[start, end)`` taken with common difference + :attr:`step` beginning from `start`. + + Note that non-integer :attr:`step` is subject to floating point rounding errors when + comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end` + in such cases. + + .. math:: + \text{out}_{{i+1}} = \text{out}_{i} + \text{step} + + Args: + start (Number): the starting value for the set of points. Default: ``0``. + end (Number): the ending value for the set of points + step (Number): the gap between each pair of adjacent points. Default: ``1``. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.arange(5) + tensor([ 0, 1, 2, 3, 4]) + >>> torch.arange(1, 4) + tensor([ 1, 2, 3]) + >>> torch.arange(1, 2.5, 0.5) + tensor([ 1.0000, 1.5000, 2.0000]) + """ + ... +@overload +def arange(start: Number, end: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil` + with values from the interval ``[start, end)`` taken with common difference + :attr:`step` beginning from `start`. + + Note that non-integer :attr:`step` is subject to floating point rounding errors when + comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end` + in such cases. + + .. math:: + \text{out}_{{i+1}} = \text{out}_{i} + \text{step} + + Args: + start (Number): the starting value for the set of points. Default: ``0``. + end (Number): the ending value for the set of points + step (Number): the gap between each pair of adjacent points. Default: ``1``. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.arange(5) + tensor([ 0, 1, 2, 3, 4]) + >>> torch.arange(1, 4) + tensor([ 1, 2, 3]) + >>> torch.arange(1, 2.5, 0.5) + tensor([ 1.0000, 1.5000, 2.0000]) + """ + ... +@overload +def arange(end: Number, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil` + with values from the interval ``[start, end)`` taken with common difference + :attr:`step` beginning from `start`. + + Note that non-integer :attr:`step` is subject to floating point rounding errors when + comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end` + in such cases. + + .. math:: + \text{out}_{{i+1}} = \text{out}_{i} + \text{step} + + Args: + start (Number): the starting value for the set of points. Default: ``0``. + end (Number): the ending value for the set of points + step (Number): the gap between each pair of adjacent points. Default: ``1``. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.arange(5) + tensor([ 0, 1, 2, 3, 4]) + >>> torch.arange(1, 4) + tensor([ 1, 2, 3]) + >>> torch.arange(1, 2.5, 0.5) + tensor([ 1.0000, 1.5000, 2.0000]) + """ + ... +@overload +def arange(end: Union[Number, _complex], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil` + with values from the interval ``[start, end)`` taken with common difference + :attr:`step` beginning from `start`. + + Note that non-integer :attr:`step` is subject to floating point rounding errors when + comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end` + in such cases. + + .. math:: + \text{out}_{{i+1}} = \text{out}_{i} + \text{step} + + Args: + start (Number): the starting value for the set of points. Default: ``0``. + end (Number): the ending value for the set of points + step (Number): the gap between each pair of adjacent points. Default: ``1``. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.arange(5) + tensor([ 0, 1, 2, 3, 4]) + >>> torch.arange(1, 4) + tensor([ 1, 2, 3]) + >>> torch.arange(1, 2.5, 0.5) + tensor([ 1.0000, 1.5000, 2.0000]) + """ + ... +@overload +def arange(start: Union[Number, _complex], end: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil` + with values from the interval ``[start, end)`` taken with common difference + :attr:`step` beginning from `start`. + + Note that non-integer :attr:`step` is subject to floating point rounding errors when + comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end` + in such cases. + + .. math:: + \text{out}_{{i+1}} = \text{out}_{i} + \text{step} + + Args: + start (Number): the starting value for the set of points. Default: ``0``. + end (Number): the ending value for the set of points + step (Number): the gap between each pair of adjacent points. Default: ``1``. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.arange(5) + tensor([ 0, 1, 2, 3, 4]) + >>> torch.arange(1, 4) + tensor([ 1, 2, 3]) + >>> torch.arange(1, 2.5, 0.5) + tensor([ 1.0000, 1.5000, 2.0000]) + """ + ... +@overload +def arange(start: Union[Number, _complex], end: Union[Number, _complex], step: Union[Number, _complex] = 1, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil` + with values from the interval ``[start, end)`` taken with common difference + :attr:`step` beginning from `start`. + + Note that non-integer :attr:`step` is subject to floating point rounding errors when + comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end` + in such cases. + + .. math:: + \text{out}_{{i+1}} = \text{out}_{i} + \text{step} + + Args: + start (Number): the starting value for the set of points. Default: ``0``. + end (Number): the ending value for the set of points + step (Number): the gap between each pair of adjacent points. Default: ``1``. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.arange(5) + tensor([ 0, 1, 2, 3, 4]) + >>> torch.arange(1, 4) + tensor([ 1, 2, 3]) + >>> torch.arange(1, 2.5, 0.5) + tensor([ 1.0000, 1.5000, 2.0000]) + """ + ... +def arccos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + arccos(input, *, out=None) -> Tensor + + Alias for :func:`torch.acos`. + """ + ... +def arccos_(input: Tensor) -> Tensor: ... +def arccosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + arccosh(input, *, out=None) -> Tensor + + Alias for :func:`torch.acosh`. + """ + ... +def arccosh_(input: Tensor) -> Tensor: ... +def arcsin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + arcsin(input, *, out=None) -> Tensor + + Alias for :func:`torch.asin`. + """ + ... +def arcsin_(input: Tensor) -> Tensor: ... +def arcsinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + arcsinh(input, *, out=None) -> Tensor + + Alias for :func:`torch.asinh`. + """ + ... +def arcsinh_(input: Tensor) -> Tensor: ... +def arctan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + arctan(input, *, out=None) -> Tensor + + Alias for :func:`torch.atan`. + """ + ... +def arctan2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + arctan2(input, other, *, out=None) -> Tensor + Alias for :func:`torch.atan2`. + """ + ... +def arctan_(input: Tensor) -> Tensor: ... +def arctanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + arctanh(input, *, out=None) -> Tensor + + Alias for :func:`torch.atanh`. + """ + ... +def arctanh_(input: Tensor) -> Tensor: ... +def argmax(input: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + argmax(input) -> LongTensor + + Returns the indices of the maximum value of all elements in the :attr:`input` tensor. + + This is the second value returned by :meth:`torch.max`. See its + documentation for the exact semantics of this method. + + .. note:: If there are multiple maximal values then the indices of the first maximal value are returned. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 1.3398, 0.2663, -0.2686, 0.2450], + [-0.7401, -0.8805, -0.3402, -1.1936], + [ 0.4907, -1.3948, -1.0691, -0.3132], + [-1.6092, 0.5419, -0.2993, 0.3195]]) + >>> torch.argmax(a) + tensor(0) + + .. function:: argmax(input, dim, keepdim=False) -> LongTensor + :noindex: + + Returns the indices of the maximum values of a tensor across a dimension. + + This is the second value returned by :meth:`torch.max`. See its + documentation for the exact semantics of this method. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. If ``None``, the argmax of the flattened input is returned. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 1.3398, 0.2663, -0.2686, 0.2450], + [-0.7401, -0.8805, -0.3402, -1.1936], + [ 0.4907, -1.3948, -1.0691, -0.3132], + [-1.6092, 0.5419, -0.2993, 0.3195]]) + >>> torch.argmax(a, dim=1) + tensor([ 0, 2, 0, 1]) + """ + ... +def argmin(input: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + argmin(input, dim=None, keepdim=False) -> LongTensor + + Returns the indices of the minimum value(s) of the flattened tensor or along a dimension + + This is the second value returned by :meth:`torch.min`. See its + documentation for the exact semantics of this method. + + .. note:: If there are multiple minimal values then the indices of the first minimal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. If ``None``, the argmin of the flattened input is returned. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.1139, 0.2254, -0.1381, 0.3687], + [ 1.0100, -1.1975, -0.0102, -0.4732], + [-0.9240, 0.1207, -0.7506, -1.0213], + [ 1.7809, -1.2960, 0.9384, 0.1438]]) + >>> torch.argmin(a) + tensor(13) + >>> torch.argmin(a, dim=1) + tensor([ 2, 1, 3, 1]) + >>> torch.argmin(a, dim=1, keepdim=True) + tensor([[2], + [1], + [3], + [1]]) + """ + ... +@overload +def argsort(input: Tensor, *, stable: _bool, dim: _int = -1, descending: _bool = False) -> Tensor: + r""" + argsort(input, dim=-1, descending=False, stable=False) -> Tensor + + Returns the indices that sort a tensor along a given dimension in ascending + order by value. + + This is the second value returned by :meth:`torch.sort`. See its documentation + for the exact semantics of this method. + + If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving + the order of equivalent elements. If ``False``, the relative order of values + which compare equal is not guaranteed. ``True`` is slower. + + Args: + input (Tensor): the input tensor. + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): controls the relative order of equivalent elements + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.0785, 1.5267, -0.8521, 0.4065], + [ 0.1598, 0.0788, -0.0745, -1.2700], + [ 1.2208, 1.0722, -0.7064, 1.2564], + [ 0.0669, -0.2318, -0.8229, -0.9280]]) + + + >>> torch.argsort(a, dim=1) + tensor([[2, 0, 3, 1], + [3, 2, 1, 0], + [2, 1, 0, 3], + [3, 2, 1, 0]]) + """ + ... +@overload +def argsort(input: Tensor, dim: _int = -1, descending: _bool = False) -> Tensor: + r""" + argsort(input, dim=-1, descending=False, stable=False) -> Tensor + + Returns the indices that sort a tensor along a given dimension in ascending + order by value. + + This is the second value returned by :meth:`torch.sort`. See its documentation + for the exact semantics of this method. + + If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving + the order of equivalent elements. If ``False``, the relative order of values + which compare equal is not guaranteed. ``True`` is slower. + + Args: + input (Tensor): the input tensor. + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): controls the relative order of equivalent elements + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.0785, 1.5267, -0.8521, 0.4065], + [ 0.1598, 0.0788, -0.0745, -1.2700], + [ 1.2208, 1.0722, -0.7064, 1.2564], + [ 0.0669, -0.2318, -0.8229, -0.9280]]) + + + >>> torch.argsort(a, dim=1) + tensor([[2, 0, 3, 1], + [3, 2, 1, 0], + [2, 1, 0, 3], + [3, 2, 1, 0]]) + """ + ... +@overload +def argsort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool = False) -> Tensor: + r""" + argsort(input, dim=-1, descending=False, stable=False) -> Tensor + + Returns the indices that sort a tensor along a given dimension in ascending + order by value. + + This is the second value returned by :meth:`torch.sort`. See its documentation + for the exact semantics of this method. + + If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving + the order of equivalent elements. If ``False``, the relative order of values + which compare equal is not guaranteed. ``True`` is slower. + + Args: + input (Tensor): the input tensor. + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): controls the relative order of equivalent elements + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.0785, 1.5267, -0.8521, 0.4065], + [ 0.1598, 0.0788, -0.0745, -1.2700], + [ 1.2208, 1.0722, -0.7064, 1.2564], + [ 0.0669, -0.2318, -0.8229, -0.9280]]) + + + >>> torch.argsort(a, dim=1) + tensor([[2, 0, 3, 1], + [3, 2, 1, 0], + [2, 1, 0, 3], + [3, 2, 1, 0]]) + """ + ... +def argwhere(input: Tensor) -> Tensor: + r""" + argwhere(input) -> Tensor + + Returns a tensor containing the indices of all non-zero elements of + :attr:`input`. Each row in the result contains the indices of a non-zero + element in :attr:`input`. The result is sorted lexicographically, with + the last index changing the fastest (C-style). + + If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor + :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of + non-zero elements in the :attr:`input` tensor. + + .. note:: + This function is similar to NumPy's `argwhere`. + + When :attr:`input` is on CUDA, this function causes host-device synchronization. + + Args: + {input} + + Example:: + + >>> t = torch.tensor([1, 0, 1]) + >>> torch.argwhere(t) + tensor([[0], + [2]]) + >>> t = torch.tensor([[1, 0, 1], [0, 1, 1]]) + >>> torch.argwhere(t) + tensor([[0, 0], + [0, 2], + [1, 1], + [1, 2]]) + """ + ... +def as_strided(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: + r""" + as_strided(input, size, stride, storage_offset=None) -> Tensor + + Create a view of an existing `torch.Tensor` :attr:`input` with specified + :attr:`size`, :attr:`stride` and :attr:`storage_offset`. + + .. warning:: + Prefer using other view functions, like :meth:`torch.Tensor.expand`, + to setting a view's strides manually with `as_strided`, as this + function's behavior depends on the implementation of a tensor's storage. + The constructed view of the storage must only refer to elements within + the storage or a runtime error will be thrown, and if the view is + "overlapped" (with multiple indices referring to the same element in + memory) its behavior is undefined. + + Args: + input (Tensor): the input tensor. + size (tuple or ints): the shape of the output tensor + stride (tuple or ints): the stride of the output tensor + storage_offset (int, optional): the offset in the underlying storage of the output tensor. + If ``None``, the storage_offset of the output tensor will match the input tensor. + + Example:: + + >>> x = torch.randn(3, 3) + >>> x + tensor([[ 0.9039, 0.6291, 1.0795], + [ 0.1586, 2.1939, -0.4900], + [-0.1909, -0.7503, 1.9355]]) + >>> t = torch.as_strided(x, (2, 2), (1, 2)) + >>> t + tensor([[0.9039, 1.0795], + [0.6291, 0.1586]]) + >>> t = torch.as_strided(x, (2, 2), (1, 2), 1) + tensor([[0.6291, 0.1586], + [1.0795, 2.1939]]) + """ + ... +def as_strided_(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: ... +def as_strided_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.as_strided`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def as_strided_scatter(input: Tensor, src: Tensor, size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], storage_offset: Optional[Union[_int, SymInt]] = None) -> Tensor: + r""" + as_strided_scatter(input, src, size, stride, storage_offset=None) -> Tensor + + Embeds the values of the :attr:`src` tensor into :attr:`input` along + the elements corresponding to the result of calling + input.as_strided(size, stride, storage_offset). + + This function returns a tensor with fresh storage; it does not + return a view. + + Args: + input (Tensor): the input tensor. + size (tuple or ints): the shape of the output tensor + stride (tuple or ints): the stride of the output tensor + storage_offset (int, optional): the offset in the underlying storage of the output tensor + + .. note:: + + :attr:`src` must be of the proper size in order to be embedded + into :attr:`input`. Specifically, it should have the same shape as + `torch.as_strided(input, size, stride, storage_offset)` + + Example:: + + >>> a = torch.arange(4).reshape(2, 2) + 1 + >>> a + tensor([[1, 2], + [3, 4]]) + >>> b = torch.zeros(3, 3) + >>> b + tensor([[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]]) + >>> torch.as_strided_scatter(b, a, (2, 2), (1, 2)) + tensor([[1., 3., 2.], + [4., 0., 0.], + [0., 0., 0.]]) + """ + ... +def as_tensor(data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None) -> Tensor: + r""" + as_tensor(data, dtype=None, device=None) -> Tensor + + Converts :attr:`data` into a tensor, sharing data and preserving autograd + history if possible. + + If :attr:`data` is already a tensor with the requested dtype and device + then :attr:`data` itself is returned, but if :attr:`data` is a + tensor with a different dtype or device then it's copied as if using + `data.to(dtype=dtype, device=device)`. + + If :attr:`data` is a NumPy array (an ndarray) with the same dtype and device then a + tensor is constructed using :func:`torch.from_numpy`. + + .. seealso:: + + :func:`torch.tensor` never shares its data and creates a new "leaf tensor" (see :doc:`/notes/autograd`). + + + Args: + data (array_like): Initial data for the tensor. Can be a list, tuple, + NumPy ``ndarray``, scalar, and other types. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, infers data type from :attr:`data`. + device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor + then the device of data is used. If None and data is not a tensor then + the result tensor is constructed on the current device. + + + Example:: + + >>> a = numpy.array([1, 2, 3]) + >>> t = torch.as_tensor(a) + >>> t + tensor([ 1, 2, 3]) + >>> t[0] = -1 + >>> a + array([-1, 2, 3]) + + >>> a = numpy.array([1, 2, 3]) + >>> t = torch.as_tensor(a, device=torch.device('cuda')) + >>> t + tensor([ 1, 2, 3]) + >>> t[0] = -1 + >>> a + array([1, 2, 3]) + """ + ... +def asarray(obj: Any, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, copy: Optional[_bool] = None, requires_grad: _bool = False) -> Tensor: + r""" + asarray(obj, *, dtype=None, device=None, copy=None, requires_grad=False) -> Tensor + + Converts :attr:`obj` to a tensor. + + :attr:`obj` can be one of: + + 1. a tensor + 2. a NumPy array or a NumPy scalar + 3. a DLPack capsule + 4. an object that implements Python's buffer protocol + 5. a scalar + 6. a sequence of scalars + + When :attr:`obj` is a tensor, NumPy array, or DLPack capsule the returned tensor will, + by default, not require a gradient, have the same datatype as :attr:`obj`, be on the + same device, and share memory with it. These properties can be controlled with the + :attr:`dtype`, :attr:`device`, :attr:`copy`, and :attr:`requires_grad` keyword arguments. + If the returned tensor is of a different datatype, on a different device, or a copy is + requested then it will not share its memory with :attr:`obj`. If :attr:`requires_grad` + is ``True`` then the returned tensor will require a gradient, and if :attr:`obj` is + also a tensor with an autograd history then the returned tensor will have the same history. + + When :attr:`obj` is not a tensor, NumPy array, or DLPack capsule but implements Python's + buffer protocol then the buffer is interpreted as an array of bytes grouped according to + the size of the datatype passed to the :attr:`dtype` keyword argument. (If no datatype is + passed then the default floating point datatype is used, instead.) The returned tensor + will have the specified datatype (or default floating point datatype if none is specified) + and, by default, be on the CPU device and share memory with the buffer. + + When :attr:`obj` is a NumPy scalar, the returned tensor will be a 0-dimensional tensor on + the CPU and that doesn't share its memory (i.e. ``copy=True``). By default datatype will + be the PyTorch datatype corresponding to the NumPy's scalar's datatype. + + When :attr:`obj` is none of the above but a scalar, or a sequence of scalars then the + returned tensor will, by default, infer its datatype from the scalar values, be on the + current default device, and not share its memory. + + .. seealso:: + + :func:`torch.tensor` creates a tensor that always copies the data from the input object. + :func:`torch.from_numpy` creates a tensor that always shares memory from NumPy arrays. + :func:`torch.frombuffer` creates a tensor that always shares memory from objects that + implement the buffer protocol. + :func:`torch.from_dlpack` creates a tensor that always shares memory from + DLPack capsules. + + Args: + obj (object): a tensor, NumPy array, DLPack Capsule, object that implements Python's + buffer protocol, scalar, or sequence of scalars. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the datatype of the returned tensor. + Default: ``None``, which causes the datatype of the returned tensor to be + inferred from :attr:`obj`. + copy (bool, optional): controls whether the returned tensor shares memory with :attr:`obj`. + Default: ``None``, which causes the returned tensor to share memory with :attr:`obj` + whenever possible. If ``True`` then the returned tensor does not share its memory. + If ``False`` then the returned tensor shares its memory with :attr:`obj` and an + error is thrown if it cannot. + device (:class:`torch.device`, optional): the device of the returned tensor. + Default: ``None``, which causes the device of :attr:`obj` to be used. Or, if + :attr:`obj` is a Python sequence, the current default device will be used. + requires_grad (bool, optional): whether the returned tensor requires grad. + Default: ``False``, which causes the returned tensor not to require a gradient. + If ``True``, then the returned tensor will require a gradient, and if :attr:`obj` + is also a tensor with an autograd history then the returned tensor will have + the same history. + + Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> # Shares memory with tensor 'a' + >>> b = torch.asarray(a) + >>> a.data_ptr() == b.data_ptr() + True + >>> # Forces memory copy + >>> c = torch.asarray(a, copy=True) + >>> a.data_ptr() == c.data_ptr() + False + + >>> a = torch.tensor([1., 2., 3.], requires_grad=True) + >>> b = a + 2 + >>> b + tensor([3., 4., 5.], grad_fn=) + >>> # Shares memory with tensor 'b', with no grad + >>> c = torch.asarray(b) + >>> c + tensor([3., 4., 5.]) + >>> # Shares memory with tensor 'b', retaining autograd history + >>> d = torch.asarray(b, requires_grad=True) + >>> d + tensor([3., 4., 5.], grad_fn=) + + >>> array = numpy.array([1, 2, 3]) + >>> # Shares memory with array 'array' + >>> t1 = torch.asarray(array) + >>> array.__array_interface__['data'][0] == t1.data_ptr() + True + >>> # Copies memory due to dtype mismatch + >>> t2 = torch.asarray(array, dtype=torch.float32) + >>> array.__array_interface__['data'][0] == t2.data_ptr() + False + + >>> scalar = numpy.float64(0.5) + >>> torch.asarray(scalar) + tensor(0.5000, dtype=torch.float64) + """ + ... +def asin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + asin(input, *, out=None) -> Tensor + + Returns a new tensor with the arcsine of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \sin^{-1}(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.5962, 1.4985, -0.4396, 1.4525]) + >>> torch.asin(a) + tensor([-0.6387, nan, -0.4552, nan]) + """ + ... +def asin_(input: Tensor) -> Tensor: ... +def asinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + asinh(input, *, out=None) -> Tensor + + Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \sinh^{-1}(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.1606, -1.4267, -1.0899, -1.0250 ]) + >>> torch.asinh(a) + tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ]) + """ + ... +def asinh_(input: Tensor) -> Tensor: ... +def atan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + atan(input, *, out=None) -> Tensor + + Returns a new tensor with the arctangent of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \tan^{-1}(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.2341, 0.2539, -0.6256, -0.6448]) + >>> torch.atan(a) + tensor([ 0.2299, 0.2487, -0.5591, -0.5727]) + """ + ... +def atan2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + atan2(input, other, *, out=None) -> Tensor + + Element-wise arctangent of :math:`\text{input}_{i} / \text{other}_{i}` + with consideration of the quadrant. Returns a new tensor with the signed angles + in radians between vector :math:`(\text{other}_{i}, \text{input}_{i})` + and vector :math:`(1, 0)`. (Note that :math:`\text{other}_{i}`, the second + parameter, is the x-coordinate, while :math:`\text{input}_{i}`, the first + parameter, is the y-coordinate.) + + The shapes of ``input`` and ``other`` must be + :ref:`broadcastable `. + + Args: + input (Tensor): the first input tensor + other (Tensor): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.9041, 0.0196, -0.3108, -2.4423]) + >>> torch.atan2(a, torch.randn(4)) + tensor([ 0.9833, 0.0811, -1.9743, -1.4151]) + """ + ... +def atan_(input: Tensor) -> Tensor: ... +def atanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + atanh(input, *, out=None) -> Tensor + + Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`. + + Note: + The domain of the inverse hyperbolic tangent is `(-1, 1)` and values outside this range + will be mapped to ``NaN``, except for the values `1` and `-1` for which the output is + mapped to `+/-INF` respectively. + + .. math:: + \text{out}_{i} = \tanh^{-1}(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4).uniform_(-1, 1) + >>> a + tensor([ -0.9385, 0.2968, -0.8591, -0.1871 ]) + >>> torch.atanh(a) + tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ]) + """ + ... +def atanh_(input: Tensor) -> Tensor: ... +def avg_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, ceil_mode: _bool = False, count_include_pad: _bool = True) -> Tensor: ... +@overload +def baddbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor) -> Tensor: + r""" + baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices in :attr:`batch1` + and :attr:`batch2`. + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same + number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a + :math:`(b \times n \times p)` tensor and :attr:`out` will be a + :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the + same as the scaling factors used in :meth:`torch.addbmm`. + + .. math:: + \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): the tensor to be added + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(10, 3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.baddbmm(M, batch1, batch2).size() + torch.Size([10, 3, 5]) + """ + ... +@overload +def baddbmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: + r""" + baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices in :attr:`batch1` + and :attr:`batch2`. + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same + number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a + :math:`(b \times n \times p)` tensor and :attr:`out` will be a + :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the + same as the scaling factors used in :meth:`torch.addbmm`. + + .. math:: + \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): the tensor to be added + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(10, 3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.baddbmm(M, batch1, batch2).size() + torch.Size([10, 3, 5]) + """ + ... +@overload +def baddbmm(input: Tensor, batch1: Tensor, batch2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices in :attr:`batch1` + and :attr:`batch2`. + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same + number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a + :math:`(b \times n \times p)` tensor and :attr:`out` will be a + :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the + same as the scaling factors used in :meth:`torch.addbmm`. + + .. math:: + \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): the tensor to be added + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(10, 3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.baddbmm(M, batch1, batch2).size() + torch.Size([10, 3, 5]) + """ + ... +@overload +def baddbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor) -> Tensor: + r""" + baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices in :attr:`batch1` + and :attr:`batch2`. + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same + number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a + :math:`(b \times n \times p)` tensor and :attr:`out` will be a + :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the + same as the scaling factors used in :meth:`torch.addbmm`. + + .. math:: + \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): the tensor to be added + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(10, 3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.baddbmm(M, batch1, batch2).size() + torch.Size([10, 3, 5]) + """ + ... +@overload +def baddbmm(beta: Union[Number, _complex], self: Tensor, batch1: Tensor, batch2: Tensor, *, out: Tensor) -> Tensor: + r""" + baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices in :attr:`batch1` + and :attr:`batch2`. + :attr:`input` is added to the final result. + + :attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same + number of matrices. + + If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a + :math:`(b \times m \times p)` tensor, then :attr:`input` must be + :ref:`broadcastable ` with a + :math:`(b \times n \times p)` tensor and :attr:`out` will be a + :math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the + same as the scaling factors used in :meth:`torch.addbmm`. + + .. math:: + \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i) + + If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in + it will not be propagated. + + For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and + :attr:`alpha` must be real numbers, otherwise they should be integers. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): the tensor to be added + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{batch1} \mathbin{@} \text{batch2}` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + + Example:: + + >>> M = torch.randn(10, 3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.baddbmm(M, batch1, batch2).size() + torch.Size([10, 3, 5]) + """ + ... +@overload +def bartlett_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + bartlett_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Bartlett window function. + + .. math:: + w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases} + \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\ + 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\ + \end{cases}, + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.bartlett_window(L, periodic=True)`` equal to + ``torch.bartlett_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window + """ + ... +@overload +def bartlett_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + bartlett_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Bartlett window function. + + .. math:: + w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases} + \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\ + 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\ + \end{cases}, + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.bartlett_window(L, periodic=True)`` equal to + ``torch.bartlett_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window + """ + ... +def batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ... +def batch_norm_backward_elemt(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], sum_dy: Tensor, sum_dy_xmu: Tensor, count: Tensor) -> Tensor: ... +def batch_norm_backward_reduce(grad_out: Tensor, input: Tensor, mean: Tensor, invstd: Tensor, weight: Optional[Tensor], input_g: _bool, weight_g: _bool, bias_g: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +def batch_norm_elemt(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, invstd: Tensor, eps: _float, *, out: Optional[Tensor] = None) -> Tensor: ... +def batch_norm_gather_stats(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, count: _int) -> Tuple[Tensor, Tensor]: ... +def batch_norm_gather_stats_with_counts(input: Tensor, mean: Tensor, invstd: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float, eps: _float, counts: Tensor) -> Tuple[Tensor, Tensor]: ... +def batch_norm_stats(input: Tensor, eps: _float) -> Tuple[Tensor, Tensor]: ... +def batch_norm_update_stats(input: Tensor, running_mean: Optional[Tensor], running_var: Optional[Tensor], momentum: _float) -> Tuple[Tensor, Tensor]: ... +@overload +def bernoulli(input: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + bernoulli(input, *, generator=None, out=None) -> Tensor + + Draws binary random numbers (0 or 1) from a Bernoulli distribution. + + The :attr:`input` tensor should be a tensor containing probabilities + to be used for drawing the binary random number. + Hence, all values in :attr:`input` have to be in the range: + :math:`0 \leq \text{input}_i \leq 1`. + + The :math:`\text{i}^{th}` element of the output tensor will draw a + value :math:`1` according to the :math:`\text{i}^{th}` probability value given + in :attr:`input`. + + .. math:: + \text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i}) + + The returned :attr:`out` tensor only has values 0 or 1 and is of the same + shape as :attr:`input`. + + :attr:`out` can have integral ``dtype``, but :attr:`input` must have floating + point ``dtype``. + + Args: + input (Tensor): the input tensor of probability values for the Bernoulli distribution + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1] + >>> a + tensor([[ 0.1737, 0.0950, 0.3609], + [ 0.7148, 0.0289, 0.2676], + [ 0.9456, 0.8937, 0.7202]]) + >>> torch.bernoulli(a) + tensor([[ 1., 0., 0.], + [ 0., 0., 0.], + [ 1., 1., 1.]]) + + >>> a = torch.ones(3, 3) # probability of drawing "1" is 1 + >>> torch.bernoulli(a) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.], + [ 1., 1., 1.]]) + >>> a = torch.zeros(3, 3) # probability of drawing "1" is 0 + >>> torch.bernoulli(a) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.], + [ 0., 0., 0.]]) + """ + ... +@overload +def bernoulli(input: Tensor, p: _float, *, generator: Optional[Generator] = None) -> Tensor: + r""" + bernoulli(input, *, generator=None, out=None) -> Tensor + + Draws binary random numbers (0 or 1) from a Bernoulli distribution. + + The :attr:`input` tensor should be a tensor containing probabilities + to be used for drawing the binary random number. + Hence, all values in :attr:`input` have to be in the range: + :math:`0 \leq \text{input}_i \leq 1`. + + The :math:`\text{i}^{th}` element of the output tensor will draw a + value :math:`1` according to the :math:`\text{i}^{th}` probability value given + in :attr:`input`. + + .. math:: + \text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i}) + + The returned :attr:`out` tensor only has values 0 or 1 and is of the same + shape as :attr:`input`. + + :attr:`out` can have integral ``dtype``, but :attr:`input` must have floating + point ``dtype``. + + Args: + input (Tensor): the input tensor of probability values for the Bernoulli distribution + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1] + >>> a + tensor([[ 0.1737, 0.0950, 0.3609], + [ 0.7148, 0.0289, 0.2676], + [ 0.9456, 0.8937, 0.7202]]) + >>> torch.bernoulli(a) + tensor([[ 1., 0., 0.], + [ 0., 0., 0.], + [ 1., 1., 1.]]) + + >>> a = torch.ones(3, 3) # probability of drawing "1" is 1 + >>> torch.bernoulli(a) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.], + [ 1., 1., 1.]]) + >>> a = torch.zeros(3, 3) # probability of drawing "1" is 0 + >>> torch.bernoulli(a) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.], + [ 0., 0., 0.]]) + """ + ... +def bilinear(input1: Tensor, input2: Tensor, weight: Tensor, bias: Optional[Tensor] = None) -> Tensor: ... +def binary_cross_entropy_with_logits(input: Tensor, target: Tensor, weight: Optional[Tensor] = None, pos_weight: Optional[Tensor] = None, reduction: _int = 1) -> Tensor: ... +def bincount(input: Tensor, weights: Optional[Tensor] = None, minlength: _int = 0) -> Tensor: + r""" + bincount(input, weights=None, minlength=0) -> Tensor + + Count the frequency of each value in an array of non-negative ints. + + The number of bins (size 1) is one larger than the largest value in + :attr:`input` unless :attr:`input` is empty, in which case the result is a + tensor of size 0. If :attr:`minlength` is specified, the number of bins is at least + :attr:`minlength` and if :attr:`input` is empty, then the result is tensor of size + :attr:`minlength` filled with zeros. If ``n`` is the value at position ``i``, + ``out[n] += weights[i]`` if :attr:`weights` is specified else + ``out[n] += 1``. + + Note: + This operation may produce nondeterministic gradients when given tensors on a CUDA device. See :doc:`/notes/randomness` for more information. + + Arguments: + input (Tensor): 1-d int tensor + weights (Tensor): optional, weight for each value in the input tensor. + Should be of same size as input tensor. + minlength (int): optional, minimum number of bins. Should be non-negative. + + Returns: + output (Tensor): a tensor of shape ``Size([max(input) + 1])`` if + :attr:`input` is non-empty, else ``Size(0)`` + + Example:: + + >>> input = torch.randint(0, 8, (5,), dtype=torch.int64) + >>> weights = torch.linspace(0, 1, steps=5) + >>> input, weights + (tensor([4, 3, 6, 3, 4]), + tensor([ 0.0000, 0.2500, 0.5000, 0.7500, 1.0000]) + + >>> torch.bincount(input) + tensor([0, 0, 0, 2, 2, 0, 1]) + + >>> input.bincount(weights) + tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000]) + """ + ... +def binomial(count: Tensor, prob: Tensor, generator: Optional[Generator] = None) -> Tensor: ... +@overload +def bitwise_and(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_and(input, other, *, out=None) -> Tensor + + Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical AND. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([1, 0, 3], dtype=torch.int8) + >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ False, True, False]) + """ + ... +@overload +def bitwise_and(self: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + bitwise_and(input, other, *, out=None) -> Tensor + + Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical AND. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([1, 0, 3], dtype=torch.int8) + >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ False, True, False]) + """ + ... +@overload +def bitwise_and(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_and(input, other, *, out=None) -> Tensor + + Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical AND. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([1, 0, 3], dtype=torch.int8) + >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ False, True, False]) + """ + ... +@overload +def bitwise_left_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_left_shift(input, other, *, out=None) -> Tensor + + Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits. + The input tensor must be of integral type. This operator supports + :ref:`broadcasting to a common shape ` and + :ref:`type promotion `. + + The operation applied is: + + .. math:: + \text{out}_i = \text{input}_i << \text{other}_i + + Args: + input (Tensor or Scalar): the first input tensor + other (Tensor or Scalar): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-2, -2, 24], dtype=torch.int8) + """ + ... +@overload +def bitwise_left_shift(self: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + bitwise_left_shift(input, other, *, out=None) -> Tensor + + Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits. + The input tensor must be of integral type. This operator supports + :ref:`broadcasting to a common shape ` and + :ref:`type promotion `. + + The operation applied is: + + .. math:: + \text{out}_i = \text{input}_i << \text{other}_i + + Args: + input (Tensor or Scalar): the first input tensor + other (Tensor or Scalar): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-2, -2, 24], dtype=torch.int8) + """ + ... +@overload +def bitwise_left_shift(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_left_shift(input, other, *, out=None) -> Tensor + + Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits. + The input tensor must be of integral type. This operator supports + :ref:`broadcasting to a common shape ` and + :ref:`type promotion `. + + The operation applied is: + + .. math:: + \text{out}_i = \text{input}_i << \text{other}_i + + Args: + input (Tensor or Scalar): the first input tensor + other (Tensor or Scalar): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-2, -2, 24], dtype=torch.int8) + """ + ... +def bitwise_not(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_not(input, *, out=None) -> Tensor + + Computes the bitwise NOT of the given input tensor. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical NOT. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8)) + tensor([ 0, 1, -4], dtype=torch.int8) + """ + ... +@overload +def bitwise_or(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_or(input, other, *, out=None) -> Tensor + + Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical OR. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-1, -2, 3], dtype=torch.int8) + >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ True, True, False]) + """ + ... +@overload +def bitwise_or(self: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + bitwise_or(input, other, *, out=None) -> Tensor + + Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical OR. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-1, -2, 3], dtype=torch.int8) + >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ True, True, False]) + """ + ... +@overload +def bitwise_or(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_or(input, other, *, out=None) -> Tensor + + Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical OR. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-1, -2, 3], dtype=torch.int8) + >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ True, True, False]) + """ + ... +@overload +def bitwise_right_shift(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_right_shift(input, other, *, out=None) -> Tensor + + Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits. + The input tensor must be of integral type. This operator supports + :ref:`broadcasting to a common shape ` and + :ref:`type promotion `. + In any case, if the value of the right operand is negative or is greater + or equal to the number of bits in the promoted left operand, the behavior is undefined. + + The operation applied is: + + .. math:: + \text{out}_i = \text{input}_i >> \text{other}_i + + Args: + input (Tensor or Scalar): the first input tensor + other (Tensor or Scalar): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-1, -7, 3], dtype=torch.int8) + """ + ... +@overload +def bitwise_right_shift(self: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + bitwise_right_shift(input, other, *, out=None) -> Tensor + + Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits. + The input tensor must be of integral type. This operator supports + :ref:`broadcasting to a common shape ` and + :ref:`type promotion `. + In any case, if the value of the right operand is negative or is greater + or equal to the number of bits in the promoted left operand, the behavior is undefined. + + The operation applied is: + + .. math:: + \text{out}_i = \text{input}_i >> \text{other}_i + + Args: + input (Tensor or Scalar): the first input tensor + other (Tensor or Scalar): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-1, -7, 3], dtype=torch.int8) + """ + ... +@overload +def bitwise_right_shift(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_right_shift(input, other, *, out=None) -> Tensor + + Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits. + The input tensor must be of integral type. This operator supports + :ref:`broadcasting to a common shape ` and + :ref:`type promotion `. + In any case, if the value of the right operand is negative or is greater + or equal to the number of bits in the promoted left operand, the behavior is undefined. + + The operation applied is: + + .. math:: + \text{out}_i = \text{input}_i >> \text{other}_i + + Args: + input (Tensor or Scalar): the first input tensor + other (Tensor or Scalar): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-1, -7, 3], dtype=torch.int8) + """ + ... +@overload +def bitwise_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_xor(input, other, *, out=None) -> Tensor + + Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical XOR. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-2, -2, 0], dtype=torch.int8) + >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ True, False, False]) + """ + ... +@overload +def bitwise_xor(self: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + bitwise_xor(input, other, *, out=None) -> Tensor + + Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical XOR. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-2, -2, 0], dtype=torch.int8) + >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ True, False, False]) + """ + ... +@overload +def bitwise_xor(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + bitwise_xor(input, other, *, out=None) -> Tensor + + Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of + integral or Boolean types. For bool tensors, it computes the logical XOR. + + Args: + input: the first input tensor + other: the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-2, -2, 0], dtype=torch.int8) + >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ True, False, False]) + """ + ... +@overload +def blackman_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + blackman_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Blackman window function. + + .. math:: + w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right) + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.blackman_window(L, periodic=True)`` equal to + ``torch.blackman_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window + """ + ... +@overload +def blackman_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + blackman_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Blackman window function. + + .. math:: + w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right) + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.blackman_window(L, periodic=True)`` equal to + ``torch.blackman_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window + """ + ... +def bmm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + bmm(input, mat2, *, out=None) -> Tensor + + Performs a batch matrix-matrix product of matrices stored in :attr:`input` + and :attr:`mat2`. + + :attr:`input` and :attr:`mat2` must be 3-D tensors each containing + the same number of matrices. + + If :attr:`input` is a :math:`(b \times n \times m)` tensor, :attr:`mat2` is a + :math:`(b \times m \times p)` tensor, :attr:`out` will be a + :math:`(b \times n \times p)` tensor. + + .. math:: + \text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + .. note:: This function does not :ref:`broadcast `. + For broadcasting matrix products, see :func:`torch.matmul`. + + Args: + input (Tensor): the first batch of matrices to be multiplied + mat2 (Tensor): the second batch of matrices to be multiplied + + Keyword Args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> input = torch.randn(10, 3, 4) + >>> mat2 = torch.randn(10, 4, 5) + >>> res = torch.bmm(input, mat2) + >>> res.size() + torch.Size([10, 3, 5]) + """ + ... +def broadcast_to(input: Tensor, size: Sequence[Union[_int, SymInt]]) -> Tensor: + r""" + broadcast_to(input, shape) -> Tensor + + Broadcasts :attr:`input` to the shape :attr:`\shape`. + Equivalent to calling ``input.expand(shape)``. See :meth:`~Tensor.expand` for details. + + Args: + input (Tensor): the input tensor. + shape (list, tuple, or :class:`torch.Size`): the new shape. + + Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> torch.broadcast_to(x, (3, 3)) + tensor([[1, 2, 3], + [1, 2, 3], + [1, 2, 3]]) + """ + ... +@overload +def bucketize(input: Tensor, boundaries: Tensor, *, out_int32: _bool = False, right: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor + + Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the + boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size + as :attr:`input`. If :attr:`right` is False (default), then the left boundary is open. Note that + this behavior is opposite the behavior of + `numpy.digitize `_. + More formally, the returned index satisfies the following rules: + + .. list-table:: + :widths: 15 85 + :header-rows: 1 + + * - :attr:`right` + - *returned index satisfies* + * - False + - ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]`` + * - True + - ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]`` + + Args: + input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s). + boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined. + + Keyword args: + out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise. + Default value is False, i.e. default output data type is torch.int64. + right (bool, optional): if False, return the first suitable location that is found. If True, return the + last such index. If no suitable index found, return 0 for non-numerical value + (eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index). + In other words, if False, gets the lower bound index for each value in :attr:`input` + from :attr:`boundaries`. If True, gets the upper bound index instead. + Default value is False. + out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided. + + + Example:: + + >>> boundaries = torch.tensor([1, 3, 5, 7, 9]) + >>> boundaries + tensor([1, 3, 5, 7, 9]) + >>> v = torch.tensor([[3, 6, 9], [3, 6, 9]]) + >>> v + tensor([[3, 6, 9], + [3, 6, 9]]) + >>> torch.bucketize(v, boundaries) + tensor([[1, 3, 4], + [1, 3, 4]]) + >>> torch.bucketize(v, boundaries, right=True) + tensor([[2, 3, 5], + [2, 3, 5]]) + """ + ... +@overload +def bucketize(self: Union[Number, _complex], boundaries: Tensor, *, out_int32: _bool = False, right: _bool = False) -> Tensor: + r""" + bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor + + Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the + boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size + as :attr:`input`. If :attr:`right` is False (default), then the left boundary is open. Note that + this behavior is opposite the behavior of + `numpy.digitize `_. + More formally, the returned index satisfies the following rules: + + .. list-table:: + :widths: 15 85 + :header-rows: 1 + + * - :attr:`right` + - *returned index satisfies* + * - False + - ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]`` + * - True + - ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]`` + + Args: + input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s). + boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined. + + Keyword args: + out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise. + Default value is False, i.e. default output data type is torch.int64. + right (bool, optional): if False, return the first suitable location that is found. If True, return the + last such index. If no suitable index found, return 0 for non-numerical value + (eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index). + In other words, if False, gets the lower bound index for each value in :attr:`input` + from :attr:`boundaries`. If True, gets the upper bound index instead. + Default value is False. + out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided. + + + Example:: + + >>> boundaries = torch.tensor([1, 3, 5, 7, 9]) + >>> boundaries + tensor([1, 3, 5, 7, 9]) + >>> v = torch.tensor([[3, 6, 9], [3, 6, 9]]) + >>> v + tensor([[3, 6, 9], + [3, 6, 9]]) + >>> torch.bucketize(v, boundaries) + tensor([[1, 3, 4], + [1, 3, 4]]) + >>> torch.bucketize(v, boundaries, right=True) + tensor([[2, 3, 5], + [2, 3, 5]]) + """ + ... +def can_cast(from_: _dtype, to: _dtype) -> _bool: + r""" + can_cast(from, to) -> bool + + Determines if a type conversion is allowed under PyTorch casting rules + described in the type promotion :ref:`documentation `. + + Args: + from (dtype): The original :class:`torch.dtype`. + to (dtype): The target :class:`torch.dtype`. + + Example:: + + >>> torch.can_cast(torch.double, torch.float) + True + >>> torch.can_cast(torch.float, torch.int) + False + """ + ... +@overload +def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: + r""" + cat(tensors, dim=0, *, out=None) -> Tensor + + Concatenates the given sequence of :attr:`seq` tensors in the given dimension. + All tensors must either have the same shape (except in the concatenating + dimension) or be a 1-D empty tensor with size ``(0,)``. + + :func:`torch.cat` can be seen as an inverse operation for :func:`torch.split` + and :func:`torch.chunk`. + + :func:`torch.cat` can be best understood via examples. + + .. seealso:: + + :func:`torch.stack` concatenates the given sequence along a new dimension. + + Args: + tensors (sequence of Tensors): any python sequence of tensors of the same type. + Non-empty tensors provided must have the same shape, except in the + cat dimension. + dim (int, optional): the dimension over which the tensors are concatenated + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497]]) + >>> torch.cat((x, x, x), 0) + tensor([[ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497], + [ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497], + [ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497]]) + >>> torch.cat((x, x, x), 1) + tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580, + -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034, + -0.5790, 0.1497]]) + """ + ... +@overload +def cat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor: + r""" + cat(tensors, dim=0, *, out=None) -> Tensor + + Concatenates the given sequence of :attr:`seq` tensors in the given dimension. + All tensors must either have the same shape (except in the concatenating + dimension) or be a 1-D empty tensor with size ``(0,)``. + + :func:`torch.cat` can be seen as an inverse operation for :func:`torch.split` + and :func:`torch.chunk`. + + :func:`torch.cat` can be best understood via examples. + + .. seealso:: + + :func:`torch.stack` concatenates the given sequence along a new dimension. + + Args: + tensors (sequence of Tensors): any python sequence of tensors of the same type. + Non-empty tensors provided must have the same shape, except in the + cat dimension. + dim (int, optional): the dimension over which the tensors are concatenated + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497]]) + >>> torch.cat((x, x, x), 0) + tensor([[ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497], + [ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497], + [ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497]]) + >>> torch.cat((x, x, x), 1) + tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580, + -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034, + -0.5790, 0.1497]]) + """ + ... +def ccol_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def ceil(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + ceil(input, *, out=None) -> Tensor + + Returns a new tensor with the ceil of the elements of :attr:`input`, + the smallest integer greater than or equal to each element. + + For integer inputs, follows the array-api convention of returning a + copy of the input tensor. + + .. math:: + \text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + >>> torch.ceil(a) + tensor([-0., -1., -1., 1.]) + """ + ... +def ceil_(input: Tensor) -> Tensor: ... +def celu(input: Tensor, alpha: Union[Number, _complex] = 1.0) -> Tensor: ... +def celu_(input: Tensor, alpha: Union[Number, _complex] = 1.0) -> Tensor: ... +def channel_shuffle(input: Tensor, groups: Union[_int, SymInt]) -> Tensor: ... +def cholesky(input: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + cholesky(input, upper=False, *, out=None) -> Tensor + + Computes the Cholesky decomposition of a symmetric positive-definite + matrix :math:`A` or for batches of symmetric positive-definite matrices. + + If :attr:`upper` is ``True``, the returned matrix ``U`` is upper-triangular, and + the decomposition has the form: + + .. math:: + + A = U^TU + + If :attr:`upper` is ``False``, the returned matrix ``L`` is lower-triangular, and + the decomposition has the form: + + .. math:: + + A = LL^T + + If :attr:`upper` is ``True``, and :math:`A` is a batch of symmetric positive-definite + matrices, then the returned tensor will be composed of upper-triangular Cholesky factors + of each of the individual matrices. Similarly, when :attr:`upper` is ``False``, the returned + tensor will be composed of lower-triangular Cholesky factors of each of the individual + matrices. + + .. warning:: + + :func:`torch.cholesky` is deprecated in favor of :func:`torch.linalg.cholesky` + and will be removed in a future PyTorch release. + + ``L = torch.cholesky(A)`` should be replaced with + + .. code:: python + + L = torch.linalg.cholesky(A) + + ``U = torch.cholesky(A, upper=True)`` should be replaced with + + .. code:: python + + U = torch.linalg.cholesky(A).mH + + This transform will produce equivalent results for all valid (symmetric positive definite) inputs. + + Args: + input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)` where `*` is zero or more + batch dimensions consisting of symmetric positive-definite matrices. + upper (bool, optional): flag that indicates whether to return a + upper or lower triangular matrix. Default: ``False`` + + Keyword args: + out (Tensor, optional): the output matrix + + Example:: + + >>> a = torch.randn(3, 3) + >>> a = a @ a.mT + 1e-3 # make symmetric positive-definite + >>> l = torch.cholesky(a) + >>> a + tensor([[ 2.4112, -0.7486, 1.4551], + [-0.7486, 1.3544, 0.1294], + [ 1.4551, 0.1294, 1.6724]]) + >>> l + tensor([[ 1.5528, 0.0000, 0.0000], + [-0.4821, 1.0592, 0.0000], + [ 0.9371, 0.5487, 0.7023]]) + >>> l @ l.mT + tensor([[ 2.4112, -0.7486, 1.4551], + [-0.7486, 1.3544, 0.1294], + [ 1.4551, 0.1294, 1.6724]]) + >>> a = torch.randn(3, 2, 2) # Example for batched input + >>> a = a @ a.mT + 1e-03 # make symmetric positive-definite + >>> l = torch.cholesky(a) + >>> z = l @ l.mT + >>> torch.dist(z, a) + tensor(2.3842e-07) + """ + ... +def cholesky_inverse(input: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + cholesky_inverse(L, upper=False, *, out=None) -> Tensor + + Computes the inverse of a complex Hermitian or real symmetric + positive-definite matrix given its Cholesky decomposition. + + Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix, + and :math:`L` its Cholesky decomposition such that: + + .. math:: + + A = LL^{\text{H}} + + where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex, + and the transpose when :math:`L` is real-valued. + + Computes the inverse matrix :math:`A^{-1}`. + + Supports input of float, double, cfloat and cdouble dtypes. + Also supports batches of matrices, and if :math:`A` is a batch of matrices + then the output has the same batch dimensions. + + Args: + L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions + consisting of lower or upper triangular Cholesky decompositions of + symmetric or Hermitian positive-definite matrices. + upper (bool, optional): flag that indicates whether :math:`L` is lower triangular + or upper triangular. Default: ``False`` + + Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + + Example:: + + >>> A = torch.randn(3, 3) + >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix + >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition + >>> torch.cholesky_inverse(L) + tensor([[ 1.9314, 1.2251, -0.0889], + [ 1.2251, 2.4439, 0.2122], + [-0.0889, 0.2122, 0.1412]]) + >>> A.inverse() + tensor([[ 1.9314, 1.2251, -0.0889], + [ 1.2251, 2.4439, 0.2122], + [-0.0889, 0.2122, 0.1412]]) + + >>> A = torch.randn(3, 2, 2, dtype=torch.complex64) + >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices + >>> L = torch.linalg.cholesky(A) + >>> torch.dist(torch.inverse(A), torch.cholesky_inverse(L)) + tensor(5.6358e-7) + """ + ... +def cholesky_solve(input: Tensor, input2: Tensor, upper: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + cholesky_solve(B, L, upper=False, *, out=None) -> Tensor + + Computes the solution of a system of linear equations with complex Hermitian + or real symmetric positive-definite lhs given its Cholesky decomposition. + + Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix, + and :math:`L` its Cholesky decomposition such that: + + .. math:: + + A = LL^{\text{H}} + + where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex, + and the transpose when :math:`L` is real-valued. + + Returns the solution :math:`X` of the following linear system: + + .. math:: + + AX = B + + Supports inputs of float, double, cfloat and cdouble dtypes. + Also supports batches of matrices, and if :math:`A` or :math:`B` is a batch of matrices + then the output has the same batch dimensions. + + Args: + B (Tensor): right-hand side tensor of shape `(*, n, k)` + where :math:`*` is zero or more batch dimensions + L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions + consisting of lower or upper triangular Cholesky decompositions of + symmetric or Hermitian positive-definite matrices. + upper (bool, optional): flag that indicates whether :math:`L` is lower triangular + or upper triangular. Default: ``False``. + + Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + + Example:: + + >>> A = torch.randn(3, 3) + >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix + >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition + >>> B = torch.randn(3, 2) + >>> torch.cholesky_solve(B, L) + tensor([[ -8.1625, 19.6097], + [ -5.8398, 14.2387], + [ -4.3771, 10.4173]]) + >>> A.inverse() @ B + tensor([[ -8.1626, 19.6097], + [ -5.8398, 14.2387], + [ -4.3771, 10.4173]]) + + >>> A = torch.randn(3, 2, 2, dtype=torch.complex64) + >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices + >>> L = torch.linalg.cholesky(A) + >>> B = torch.randn(2, 1, dtype=torch.complex64) + >>> X = torch.cholesky_solve(B, L) + >>> torch.dist(X, A.inverse() @ B) + tensor(1.6881e-5) + """ + ... +def choose_qparams_optimized(input: Tensor, numel: _int, n_bins: _int, ratio: _float, bit_width: _int) -> Tuple[Tensor, Tensor]: ... +def chunk(input: Tensor, chunks: _int, dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + chunk(input, chunks, dim=0) -> List of Tensors + + Attempts to split a tensor into the specified number of chunks. Each chunk is a view of + the input tensor. + + + .. note:: + + This function may return fewer than the specified number of chunks! + + .. seealso:: + + :func:`torch.tensor_split` a function that always returns exactly the specified number of chunks + + If the tensor size along the given dimension :attr:`dim` is divisible by :attr:`chunks`, + all returned chunks will be the same size. + If the tensor size along the given dimension :attr:`dim` is not divisible by :attr:`chunks`, + all returned chunks will be the same size, except the last one. + If such division is not possible, this function may return fewer + than the specified number of chunks. + + Arguments: + input (Tensor): the tensor to split + chunks (int): number of chunks to return + dim (int): dimension along which to split the tensor + + Example: + >>> torch.arange(11).chunk(6) + (tensor([0, 1]), + tensor([2, 3]), + tensor([4, 5]), + tensor([6, 7]), + tensor([8, 9]), + tensor([10])) + >>> torch.arange(12).chunk(6) + (tensor([0, 1]), + tensor([2, 3]), + tensor([4, 5]), + tensor([6, 7]), + tensor([8, 9]), + tensor([10, 11])) + >>> torch.arange(13).chunk(6) + (tensor([0, 1, 2]), + tensor([3, 4, 5]), + tensor([6, 7, 8]), + tensor([ 9, 10, 11]), + tensor([12])) + """ + ... +@overload +def clamp(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + clamp(input, min=None, max=None, *, out=None) -> Tensor + + Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`. + Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns: + + .. math:: + y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i) + + If :attr:`min` is ``None``, there is no lower bound. + Or, if :attr:`max` is ``None`` there is no upper bound. + + + .. note:: + If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) ` + sets all elements in :attr:`input` to the value of :attr:`max`. + + Args: + input (Tensor): the input tensor. + min (Number or Tensor, optional): lower-bound of the range to be clamped to + max (Number or Tensor, optional): upper-bound of the range to be clamped to + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-1.7120, 0.1734, -0.0478, -0.0922]) + >>> torch.clamp(a, min=-0.5, max=0.5) + tensor([-0.5000, 0.1734, -0.0478, -0.0922]) + + >>> min = torch.linspace(-1, 1, steps=4) + >>> torch.clamp(a, min=min) + tensor([-1.0000, 0.1734, 0.3333, 1.0000]) + """ + ... +@overload +def clamp(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + clamp(input, min=None, max=None, *, out=None) -> Tensor + + Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`. + Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns: + + .. math:: + y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i) + + If :attr:`min` is ``None``, there is no lower bound. + Or, if :attr:`max` is ``None`` there is no upper bound. + + + .. note:: + If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) ` + sets all elements in :attr:`input` to the value of :attr:`max`. + + Args: + input (Tensor): the input tensor. + min (Number or Tensor, optional): lower-bound of the range to be clamped to + max (Number or Tensor, optional): upper-bound of the range to be clamped to + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-1.7120, 0.1734, -0.0478, -0.0922]) + >>> torch.clamp(a, min=-0.5, max=0.5) + tensor([-0.5000, 0.1734, -0.0478, -0.0922]) + + >>> min = torch.linspace(-1, 1, steps=4) + >>> torch.clamp(a, min=min) + tensor([-1.0000, 0.1734, 0.3333, 1.0000]) + """ + ... +@overload +def clamp_(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp_(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ... +@overload +def clamp_max(input: Tensor, max: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp_max(input: Tensor, max: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp_max_(input: Tensor, max: Tensor) -> Tensor: ... +@overload +def clamp_max_(input: Tensor, max: Union[Number, _complex]) -> Tensor: ... +@overload +def clamp_min(input: Tensor, min: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp_min(input: Tensor, min: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def clamp_min_(input: Tensor, min: Tensor) -> Tensor: ... +@overload +def clamp_min_(input: Tensor, min: Union[Number, _complex]) -> Tensor: ... +@overload +def clip(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + clip(input, min=None, max=None, *, out=None) -> Tensor + + Alias for :func:`torch.clamp`. + """ + ... +@overload +def clip(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + clip(input, min=None, max=None, *, out=None) -> Tensor + + Alias for :func:`torch.clamp`. + """ + ... +@overload +def clip_(input: Tensor, min: Optional[Tensor] = None, max: Optional[Tensor] = None) -> Tensor: ... +@overload +def clip_(input: Tensor, min: Optional[Union[Number, _complex]] = None, max: Optional[Union[Number, _complex]] = None) -> Tensor: ... +def clone(input: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor: + r""" + clone(input, *, memory_format=torch.preserve_format) -> Tensor + + Returns a copy of :attr:`input`. + + .. note:: + + This function is differentiable, so gradients will flow back from the + result of this operation to :attr:`input`. To create a tensor without an + autograd relationship to :attr:`input` see :meth:`~Tensor.detach`. + + Args: + input (Tensor): the input tensor. + + Keyword args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned tensor. Default: ``torch.preserve_format``. + """ + ... +def col_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.col_indices`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def column_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: + r""" + column_stack(tensors, *, out=None) -> Tensor + + Creates a new tensor by horizontally stacking the tensors in :attr:`tensors`. + + Equivalent to ``torch.hstack(tensors)``, except each zero or one dimensional tensor ``t`` + in :attr:`tensors` is first reshaped into a ``(t.numel(), 1)`` column before being stacked horizontally. + + Args: + tensors (sequence of Tensors): sequence of tensors to concatenate + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5, 6]) + >>> torch.column_stack((a, b)) + tensor([[1, 4], + [2, 5], + [3, 6]]) + >>> a = torch.arange(5) + >>> b = torch.arange(10).reshape(5, 2) + >>> torch.column_stack((a, b, b)) + tensor([[0, 0, 1, 0, 1], + [1, 2, 3, 2, 3], + [2, 4, 5, 4, 5], + [3, 6, 7, 6, 7], + [4, 8, 9, 8, 9]]) + """ + ... +def combinations(input: Tensor, r: _int = 2, with_replacement: _bool = False) -> Tensor: + r""" + combinations(input, r=2, with_replacement=False) -> seq + + Compute combinations of length :math:`r` of the given tensor. The behavior is similar to + python's `itertools.combinations` when `with_replacement` is set to `False`, and + `itertools.combinations_with_replacement` when `with_replacement` is set to `True`. + + Arguments: + input (Tensor): 1D vector. + r (int, optional): number of elements to combine + with_replacement (bool, optional): whether to allow duplication in combination + + Returns: + Tensor: A tensor equivalent to converting all the input tensors into lists, do + `itertools.combinations` or `itertools.combinations_with_replacement` on these + lists, and finally convert the resulting list into tensor. + + Example:: + + >>> a = [1, 2, 3] + >>> list(itertools.combinations(a, r=2)) + [(1, 2), (1, 3), (2, 3)] + >>> list(itertools.combinations(a, r=3)) + [(1, 2, 3)] + >>> list(itertools.combinations_with_replacement(a, r=2)) + [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)] + >>> tensor_a = torch.tensor(a) + >>> torch.combinations(tensor_a) + tensor([[1, 2], + [1, 3], + [2, 3]]) + >>> torch.combinations(tensor_a, r=3) + tensor([[1, 2, 3]]) + >>> torch.combinations(tensor_a, with_replacement=True) + tensor([[1, 1], + [1, 2], + [1, 3], + [2, 2], + [2, 3], + [3, 3]]) + """ + ... +def complex(real: Tensor, imag: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + complex(real, imag, *, out=None) -> Tensor + + Constructs a complex tensor with its real part equal to :attr:`real` and its + imaginary part equal to :attr:`imag`. + + Args: + real (Tensor): The real part of the complex tensor. Must be half, float or double. + imag (Tensor): The imaginary part of the complex tensor. Must be same dtype + as :attr:`real`. + + Keyword args: + out (Tensor): If the inputs are ``torch.float32``, must be + ``torch.complex64``. If the inputs are ``torch.float64``, must be + ``torch.complex128``. + + Example:: + + >>> real = torch.tensor([1, 2], dtype=torch.float32) + >>> imag = torch.tensor([3, 4], dtype=torch.float32) + >>> z = torch.complex(real, imag) + >>> z + tensor([(1.+3.j), (2.+4.j)]) + >>> z.dtype + torch.complex64 + """ + ... +@overload +def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: + r""" + concat(tensors, dim=0, *, out=None) -> Tensor + + Alias of :func:`torch.cat`. + """ + ... +@overload +def concat(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor: + r""" + concat(tensors, dim=0, *, out=None) -> Tensor + + Alias of :func:`torch.cat`. + """ + ... +@overload +def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: + r""" + concatenate(tensors, axis=0, out=None) -> Tensor + + Alias of :func:`torch.cat`. + """ + ... +@overload +def concatenate(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor: + r""" + concatenate(tensors, axis=0, out=None) -> Tensor + + Alias of :func:`torch.cat`. + """ + ... +def conj(input: Tensor) -> Tensor: + r""" + conj(input) -> Tensor + + Returns a view of :attr:`input` with a flipped conjugate bit. If :attr:`input` has a non-complex dtype, + this function just returns :attr:`input`. + + .. note:: + :func:`torch.conj` performs a lazy conjugation, but the actual conjugated tensor can be materialized + at any time using :func:`torch.resolve_conj`. + + .. warning:: In the future, :func:`torch.conj` may return a non-writeable view for an :attr:`input` of + non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical` + when :attr:`input` is of non-complex dtype to be compatible with this change. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]) + >>> x.is_conj() + False + >>> y = torch.conj(x) + >>> y.is_conj() + True + """ + ... +def conj_physical(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + conj_physical(input, *, out=None) -> Tensor + + Computes the element-wise conjugate of the given :attr:`input` tensor. + If :attr:`input` has a non-complex dtype, this function just returns :attr:`input`. + + .. note:: + This performs the conjugate operation regardless of the fact conjugate bit is set or not. + + .. warning:: In the future, :func:`torch.conj_physical` may return a non-writeable view for an :attr:`input` of + non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical` + when :attr:`input` is of non-complex dtype to be compatible with this change. + + .. math:: + \text{out}_{i} = conj(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.conj_physical(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])) + tensor([-1 - 1j, -2 - 2j, 3 + 3j]) + """ + ... +def conj_physical_(input: Tensor) -> Tensor: ... +def constant_pad_nd(input: Tensor, pad: Sequence[Union[_int, SymInt]], value: Union[Number, _complex] = 0) -> Tensor: ... +@overload +def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +@overload +def conv1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +@overload +def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +@overload +def conv2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +@overload +def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +@overload +def conv3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: str = "valid", dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, groups: Union[_int, SymInt] = 1) -> Tensor: ... +def conv_tbc(input: Tensor, weight: Tensor, bias: Tensor, pad: _int = 0) -> Tensor: ... +def conv_transpose1d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ... +def conv_transpose2d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ... +def conv_transpose3d(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, stride: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1, padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, output_padding: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 0, groups: Union[_int, SymInt] = 1, dilation: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]] = 1) -> Tensor: ... +def convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], transposed: _bool, output_padding: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +@overload +def copysign(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + copysign(input, other, *, out=None) -> Tensor + + Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise. + + .. math:: + \text{out}_{i} = \begin{cases} + -|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\ + |\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\ + \end{cases} + + + Supports :ref:`broadcasting to a common shape `, + and integer and float inputs. + + Args: + input (Tensor): magnitudes. + other (Tensor or Number): contains value(s) whose signbit(s) are + applied to the magnitudes in :attr:`input`. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(5) + >>> a + tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244]) + >>> torch.copysign(a, 1) + tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244]) + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.7079, 0.2778, -1.0249, 0.5719], + [-0.0059, -0.2600, -0.4475, -1.3948], + [ 0.3667, -0.9567, -2.5757, -0.1751], + [ 0.2046, -0.0742, 0.2998, -0.1054]]) + >>> b = torch.randn(4) + tensor([ 0.2373, 0.3120, 0.3190, -1.1128]) + >>> torch.copysign(a, b) + tensor([[ 0.7079, 0.2778, 1.0249, -0.5719], + [ 0.0059, 0.2600, 0.4475, -1.3948], + [ 0.3667, 0.9567, 2.5757, -0.1751], + [ 0.2046, 0.0742, 0.2998, -0.1054]]) + >>> a = torch.tensor([1.]) + >>> b = torch.tensor([-0.]) + >>> torch.copysign(a, b) + tensor([-1.]) + + .. note:: + copysign handles signed zeros. If the other argument has a negative zero (-0), + the corresponding output value will be negative. + """ + ... +@overload +def copysign(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + copysign(input, other, *, out=None) -> Tensor + + Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise. + + .. math:: + \text{out}_{i} = \begin{cases} + -|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\ + |\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\ + \end{cases} + + + Supports :ref:`broadcasting to a common shape `, + and integer and float inputs. + + Args: + input (Tensor): magnitudes. + other (Tensor or Number): contains value(s) whose signbit(s) are + applied to the magnitudes in :attr:`input`. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(5) + >>> a + tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244]) + >>> torch.copysign(a, 1) + tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244]) + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.7079, 0.2778, -1.0249, 0.5719], + [-0.0059, -0.2600, -0.4475, -1.3948], + [ 0.3667, -0.9567, -2.5757, -0.1751], + [ 0.2046, -0.0742, 0.2998, -0.1054]]) + >>> b = torch.randn(4) + tensor([ 0.2373, 0.3120, 0.3190, -1.1128]) + >>> torch.copysign(a, b) + tensor([[ 0.7079, 0.2778, 1.0249, -0.5719], + [ 0.0059, 0.2600, 0.4475, -1.3948], + [ 0.3667, 0.9567, 2.5757, -0.1751], + [ 0.2046, 0.0742, 0.2998, -0.1054]]) + >>> a = torch.tensor([1.]) + >>> b = torch.tensor([-0.]) + >>> torch.copysign(a, b) + tensor([-1.]) + + .. note:: + copysign handles signed zeros. If the other argument has a negative zero (-0), + the corresponding output value will be negative. + """ + ... +def corrcoef(input: Tensor) -> Tensor: + r""" + corrcoef(input) -> Tensor + + Estimates the Pearson product-moment correlation coefficient matrix of the variables given by the :attr:`input` matrix, + where rows are the variables and columns are the observations. + + .. note:: + + The correlation coefficient matrix R is computed using the covariance matrix C as given by + :math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }` + + .. note:: + + Due to floating point rounding, the resulting array may not be Hermitian and its diagonal elements may not be 1. + The real and imaginary values are clipped to the interval [-1, 1] in an attempt to improve this situation. + + Args: + input (Tensor): A 2D matrix containing multiple variables and observations, or a + Scalar or 1D vector representing a single variable. + + Returns: + (Tensor) The correlation coefficient matrix of the variables. + + .. seealso:: + + :func:`torch.cov` covariance matrix. + + Example:: + + >>> x = torch.tensor([[0, 1, 2], [2, 1, 0]]) + >>> torch.corrcoef(x) + tensor([[ 1., -1.], + [-1., 1.]]) + >>> x = torch.randn(2, 4) + >>> x + tensor([[-0.2678, -0.0908, -0.3766, 0.2780], + [-0.5812, 0.1535, 0.2387, 0.2350]]) + >>> torch.corrcoef(x) + tensor([[1.0000, 0.3582], + [0.3582, 1.0000]]) + >>> torch.corrcoef(x[0]) + tensor(1.) + """ + ... +def cos(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + cos(input, *, out=None) -> Tensor + + Returns a new tensor with the cosine of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \cos(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 1.4309, 1.2706, -0.8562, 0.9796]) + >>> torch.cos(a) + tensor([ 0.1395, 0.2957, 0.6553, 0.5574]) + """ + ... +def cos_(input: Tensor) -> Tensor: ... +def cosh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + cosh(input, *, out=None) -> Tensor + + Returns a new tensor with the hyperbolic cosine of the elements of + :attr:`input`. + + .. math:: + \text{out}_{i} = \cosh(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.1632, 1.1835, -0.6979, -0.7325]) + >>> torch.cosh(a) + tensor([ 1.0133, 1.7860, 1.2536, 1.2805]) + + .. note:: + When :attr:`input` is on the CPU, the implementation of torch.cosh may use + the Sleef library, which rounds very large results to infinity or negative + infinity. See `here `_ for details. + """ + ... +def cosh_(input: Tensor) -> Tensor: ... +def cosine_embedding_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: _float = 0.0, reduction: _int = 1) -> Tensor: ... +def cosine_similarity(x1: Tensor, x2: Tensor, dim: _int = 1, eps: _float = 1e-08) -> Tensor: ... +@overload +def count_nonzero(input: Tensor, dim: Optional[_int] = None) -> Tensor: + r""" + count_nonzero(input, dim=None) -> Tensor + + Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`. + If no dim is specified then all non-zeros in the tensor are counted. + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros. + + Example:: + + >>> x = torch.zeros(3,3) + >>> x[torch.randn(3,3) > 0.5] = 1 + >>> x + tensor([[0., 1., 1.], + [0., 0., 0.], + [0., 0., 1.]]) + >>> torch.count_nonzero(x) + tensor(3) + >>> torch.count_nonzero(x, dim=0) + tensor([0, 1, 2]) + """ + ... +@overload +def count_nonzero(input: Tensor, dim: _size) -> Tensor: + r""" + count_nonzero(input, dim=None) -> Tensor + + Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`. + If no dim is specified then all non-zeros in the tensor are counted. + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros. + + Example:: + + >>> x = torch.zeros(3,3) + >>> x[torch.randn(3,3) > 0.5] = 1 + >>> x + tensor([[0., 1., 1.], + [0., 0., 0.], + [0., 0., 1.]]) + >>> torch.count_nonzero(x) + tensor(3) + >>> torch.count_nonzero(x, dim=0) + tensor([0, 1, 2]) + """ + ... +def cov(input: Tensor, *, correction: _int = 1, fweights: Optional[Tensor] = None, aweights: Optional[Tensor] = None) -> Tensor: + r""" + cov(input, *, correction=1, fweights=None, aweights=None) -> Tensor + + Estimates the covariance matrix of the variables given by the :attr:`input` matrix, where rows are + the variables and columns are the observations. + + A covariance matrix is a square matrix giving the covariance of each pair of variables. The diagonal contains + the variance of each variable (covariance of a variable with itself). By definition, if :attr:`input` represents + a single variable (Scalar or 1D) then its variance is returned. + + The sample covariance of the variables :math:`x` and :math:`y` is given by: + + .. math:: + \text{cov}(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{\max(0,~N~-~\delta N)} + + where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively, and + :math:`\delta N` is the :attr:`correction`. + + If :attr:`fweights` and/or :attr:`aweights` are provided, the weighted covariance + is calculated, which is given by: + + .. math:: + \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)} + {\max(0,~\sum^{N}_{i = 1}w_i~-~\frac{\sum^{N}_{i = 1}w_ia_i}{\sum^{N}_{i = 1}w_i}~\delta N)} + + where :math:`w` denotes :attr:`fweights` or :attr:`aweights` (``f`` and ``a`` for brevity) based on whichever is + provided, or :math:`w = f \times a` if both are provided, and + :math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable. If not + provided, ``f`` and/or ``a`` can be seen as a :math:`\mathbb{1}` vector of appropriate size. + + Args: + input (Tensor): A 2D matrix containing multiple variables and observations, or a + Scalar or 1D vector representing a single variable. + + Keyword Args: + correction (int, optional): difference between the sample size and sample degrees of freedom. + Defaults to Bessel's correction, ``correction = 1`` which returns the unbiased estimate, + even if both :attr:`fweights` and :attr:`aweights` are specified. ``correction = 0`` + will return the simple average. Defaults to ``1``. + fweights (tensor, optional): A Scalar or 1D tensor of observation vector frequencies representing the number of + times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`. + Must have integral dtype. Ignored if ``None``. Defaults to ``None``. + aweights (tensor, optional): A Scalar or 1D array of observation vector weights. + These relative weights are typically large for observations considered “important” and smaller for + observations considered less “important”. Its numel must equal the number of columns of :attr:`input`. + Must have floating point dtype. Ignored if ``None``. Defaults to ``None``. + + Returns: + (Tensor) The covariance matrix of the variables. + + .. seealso:: + + :func:`torch.corrcoef` normalized covariance matrix. + + Example:: + >>> x = torch.tensor([[0, 2], [1, 1], [2, 0]]).T + >>> x + tensor([[0, 1, 2], + [2, 1, 0]]) + >>> torch.cov(x) + tensor([[ 1., -1.], + [-1., 1.]]) + >>> torch.cov(x, correction=0) + tensor([[ 0.6667, -0.6667], + [-0.6667, 0.6667]]) + >>> fw = torch.randint(1, 10, (3,)) + >>> fw + tensor([1, 6, 9]) + >>> aw = torch.rand(3) + >>> aw + tensor([0.4282, 0.0255, 0.4144]) + >>> torch.cov(x, fweights=fw, aweights=aw) + tensor([[ 0.4169, -0.4169], + [-0.4169, 0.4169]]) + """ + ... +def cross(input: Tensor, other: Tensor, dim: Optional[_int] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + cross(input, other, dim=None, *, out=None) -> Tensor + + + Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input` + and :attr:`other`. + + Supports input of float, double, cfloat and cdouble dtypes. Also supports batches + of vectors, for which it computes the product along the dimension :attr:`dim`. + In this case, the output has the same batch dimensions as the inputs. + + .. warning:: + If :attr:`dim` is not given, it defaults to the first dimension found + with the size 3. Note that this might be unexpected. + + This behavior is deprecated and will be changed to match that of :func:`torch.linalg.cross` + in a future release. + + .. seealso:: + :func:`torch.linalg.cross` which has dim=-1 as default. + + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + dim (int, optional): the dimension to take the cross-product in. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4, 3) + >>> a + tensor([[-0.3956, 1.1455, 1.6895], + [-0.5849, 1.3672, 0.3599], + [-1.1626, 0.7180, -0.0521], + [-0.1339, 0.9902, -2.0225]]) + >>> b = torch.randn(4, 3) + >>> b + tensor([[-0.0257, -1.4725, -1.2251], + [-1.1479, -0.7005, -1.9757], + [-1.3904, 0.3726, -1.1836], + [-0.9688, -0.7153, 0.2159]]) + >>> torch.cross(a, b, dim=1) + tensor([[ 1.0844, -0.5281, 0.6120], + [-2.4490, -1.5687, 1.9792], + [-0.8304, -1.3037, 0.5650], + [-1.2329, 1.9883, 1.0551]]) + >>> torch.cross(a, b) + tensor([[ 1.0844, -0.5281, 0.6120], + [-2.4490, -1.5687, 1.9792], + [-0.8304, -1.3037, 0.5650], + [-1.2329, 1.9883, 1.0551]]) + """ + ... +def crow_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.crow_indices`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: _size, target_lengths: _size, blank: _int = 0, reduction: _int = 1, zero_infinity: _bool = False) -> Tensor: ... +@overload +def ctc_loss(log_probs: Tensor, targets: Tensor, input_lengths: Tensor, target_lengths: Tensor, blank: _int = 0, reduction: _int = 1, zero_infinity: _bool = False) -> Tensor: ... +def cudnn_affine_grid_generator(theta: Tensor, N: _int, C: _int, H: _int, W: _int) -> Tensor: ... +def cudnn_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +def cudnn_convolution(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, allow_tf32: _bool, *, out: Optional[Tensor] = None) -> Tensor: ... +def cudnn_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Union[Number, _complex]], bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def cudnn_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def cudnn_convolution_transpose(input: Tensor, weight: Tensor, padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool, allow_tf32: _bool) -> Tensor: ... +def cudnn_grid_sampler(input: Tensor, grid: Tensor) -> Tensor: ... +def cudnn_is_acceptable(input: Tensor) -> _bool: ... +@overload +def cummax(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummax: + r""" + cummax(input, dim, *, out=None) -> (Tensor, LongTensor) + Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of + elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index + location of each maximum value found in the dimension :attr:`dim`. + + .. math:: + y_i = max(x_1, x_2, x_3, \dots, x_i) + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + + Example:: + + >>> a = torch.randn(10) + >>> a + tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284, + 1.9946, -0.8209]) + >>> torch.cummax(a, dim=0) + torch.return_types.cummax( + values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696, + 1.9946, 1.9946]), + indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8])) + """ + ... +@overload +def cummax(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummax: + r""" + cummax(input, dim, *, out=None) -> (Tensor, LongTensor) + Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of + elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index + location of each maximum value found in the dimension :attr:`dim`. + + .. math:: + y_i = max(x_1, x_2, x_3, \dots, x_i) + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + + Example:: + + >>> a = torch.randn(10) + >>> a + tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284, + 1.9946, -0.8209]) + >>> torch.cummax(a, dim=0) + torch.return_types.cummax( + values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696, + 1.9946, 1.9946]), + indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8])) + """ + ... +@overload +def cummin(input: Tensor, dim: _int, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummin: + r""" + cummin(input, dim, *, out=None) -> (Tensor, LongTensor) + Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of + elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index + location of each maximum value found in the dimension :attr:`dim`. + + .. math:: + y_i = min(x_1, x_2, x_3, \dots, x_i) + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + + Example:: + + >>> a = torch.randn(10) + >>> a + tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762, + 0.9165, 1.6684]) + >>> torch.cummin(a, dim=0) + torch.return_types.cummin( + values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298, + -1.3298, -1.3298]), + indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4])) + """ + ... +@overload +def cummin(input: Tensor, dim: Union[str, ellipsis, None], *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.cummin: + r""" + cummin(input, dim, *, out=None) -> (Tensor, LongTensor) + Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of + elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index + location of each maximum value found in the dimension :attr:`dim`. + + .. math:: + y_i = min(x_1, x_2, x_3, \dots, x_i) + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + + Example:: + + >>> a = torch.randn(10) + >>> a + tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762, + 0.9165, 1.6684]) + >>> torch.cummin(a, dim=0) + torch.return_types.cummin( + values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298, + -1.3298, -1.3298]), + indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4])) + """ + ... +@overload +def cumprod(input: Tensor, dim: _int, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + cumprod(input, dim, *, dtype=None, out=None) -> Tensor + + Returns the cumulative product of elements of :attr:`input` in the dimension + :attr:`dim`. + + For example, if :attr:`input` is a vector of size N, the result will also be + a vector of size N, with elements. + + .. math:: + y_i = x_1 \times x_2\times x_3\times \dots \times x_i + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(10) + >>> a + tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126, + -0.2129, -0.4206, 0.1968]) + >>> torch.cumprod(a, dim=0) + tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065, + 0.0014, -0.0006, -0.0001]) + + >>> a[5] = 0.0 + >>> torch.cumprod(a, dim=0) + tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000, + 0.0000, -0.0000, -0.0000]) + """ + ... +@overload +def cumprod(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + cumprod(input, dim, *, dtype=None, out=None) -> Tensor + + Returns the cumulative product of elements of :attr:`input` in the dimension + :attr:`dim`. + + For example, if :attr:`input` is a vector of size N, the result will also be + a vector of size N, with elements. + + .. math:: + y_i = x_1 \times x_2\times x_3\times \dots \times x_i + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(10) + >>> a + tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126, + -0.2129, -0.4206, 0.1968]) + >>> torch.cumprod(a, dim=0) + tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065, + 0.0014, -0.0006, -0.0001]) + + >>> a[5] = 0.0 + >>> torch.cumprod(a, dim=0) + tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000, + 0.0000, -0.0000, -0.0000]) + """ + ... +@overload +def cumsum(input: Tensor, dim: _int, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + cumsum(input, dim, *, dtype=None, out=None) -> Tensor + + Returns the cumulative sum of elements of :attr:`input` in the dimension + :attr:`dim`. + + For example, if :attr:`input` is a vector of size N, the result will also be + a vector of size N, with elements. + + .. math:: + y_i = x_1 + x_2 + x_3 + \dots + x_i + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randint(1, 20, (10,)) + >>> a + tensor([13, 7, 3, 10, 13, 3, 15, 10, 9, 10]) + >>> torch.cumsum(a, dim=0) + tensor([13, 20, 23, 33, 46, 49, 64, 74, 83, 93]) + """ + ... +@overload +def cumsum(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + cumsum(input, dim, *, dtype=None, out=None) -> Tensor + + Returns the cumulative sum of elements of :attr:`input` in the dimension + :attr:`dim`. + + For example, if :attr:`input` is a vector of size N, the result will also be + a vector of size N, with elements. + + .. math:: + y_i = x_1 + x_2 + x_3 + \dots + x_i + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randint(1, 20, (10,)) + >>> a + tensor([13, 7, 3, 10, 13, 3, 15, 10, 9, 10]) + >>> torch.cumsum(a, dim=0) + tensor([13, 20, 23, 33, 46, 49, 64, 74, 83, 93]) + """ + ... +@overload +def cumulative_trapezoid(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor: + r""" + cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor + + Cumulatively computes the `trapezoidal rule `_ + along :attr:`dim`. By default the spacing between elements is assumed to be 1, but + :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be + used to specify arbitrary spacing along :attr:`dim`. + + For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid` + and this function is that, :func:`torch.trapezoid` returns a value for each integration, + where as this function returns a cumulative value for every spacing within the integration. This + is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum. + + Arguments: + y (Tensor): Values to use when computing the trapezoidal rule. + x (Tensor): If specified, defines spacing between values as specified above. + + Keyword arguments: + dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx` + are specified then this defaults to 1. Effectively multiplies the result by its value. + dim (int): The dimension along which to compute the trapezoidal rule. + The last (inner-most) dimension by default. + + Examples:: + + >>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1. + >>> y = torch.tensor([1, 5, 10]) + >>> torch.cumulative_trapezoid(y) + tensor([3., 10.5]) + + >>> # Computes the same trapezoidal rule directly up to each element to verify + >>> (1 + 5) / 2 + 3.0 + >>> (1 + 10 + 10) / 2 + 10.5 + + >>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2 + >>> # NOTE: the result is the same as before, but multiplied by 2 + >>> torch.cumulative_trapezoid(y, dx=2) + tensor([6., 21.]) + + >>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing + >>> x = torch.tensor([1, 3, 6]) + >>> torch.cumulative_trapezoid(y, x) + tensor([6., 28.5]) + + >>> # Computes the same trapezoidal rule directly up to each element to verify + >>> ((3 - 1) * (1 + 5)) / 2 + 6.0 + >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2 + 28.5 + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix + >>> y = torch.arange(9).reshape(3, 3) + tensor([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> torch.cumulative_trapezoid(y) + tensor([[ 0.5, 2.], + [ 3.5, 8.], + [ 6.5, 14.]]) + + >>> # Cumulatively computes the trapezoidal rule for each column of the matrix + >>> torch.cumulative_trapezoid(y, dim=0) + tensor([[ 1.5, 2.5, 3.5], + [ 6.0, 8.0, 10.0]]) + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with the same arbitrary spacing + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([1, 3, 6]) + >>> torch.cumulative_trapezoid(y, x) + tensor([[2., 5.], + [2., 5.], + [2., 5.]]) + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with different arbitrary spacing per row + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]]) + >>> torch.cumulative_trapezoid(y, x) + tensor([[1., 2.], + [2., 4.], + [3., 6.]]) + """ + ... +@overload +def cumulative_trapezoid(y: Tensor, *, dx: Union[Number, _complex] = 1, dim: _int = -1) -> Tensor: + r""" + cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor + + Cumulatively computes the `trapezoidal rule `_ + along :attr:`dim`. By default the spacing between elements is assumed to be 1, but + :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be + used to specify arbitrary spacing along :attr:`dim`. + + For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid` + and this function is that, :func:`torch.trapezoid` returns a value for each integration, + where as this function returns a cumulative value for every spacing within the integration. This + is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum. + + Arguments: + y (Tensor): Values to use when computing the trapezoidal rule. + x (Tensor): If specified, defines spacing between values as specified above. + + Keyword arguments: + dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx` + are specified then this defaults to 1. Effectively multiplies the result by its value. + dim (int): The dimension along which to compute the trapezoidal rule. + The last (inner-most) dimension by default. + + Examples:: + + >>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1. + >>> y = torch.tensor([1, 5, 10]) + >>> torch.cumulative_trapezoid(y) + tensor([3., 10.5]) + + >>> # Computes the same trapezoidal rule directly up to each element to verify + >>> (1 + 5) / 2 + 3.0 + >>> (1 + 10 + 10) / 2 + 10.5 + + >>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2 + >>> # NOTE: the result is the same as before, but multiplied by 2 + >>> torch.cumulative_trapezoid(y, dx=2) + tensor([6., 21.]) + + >>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing + >>> x = torch.tensor([1, 3, 6]) + >>> torch.cumulative_trapezoid(y, x) + tensor([6., 28.5]) + + >>> # Computes the same trapezoidal rule directly up to each element to verify + >>> ((3 - 1) * (1 + 5)) / 2 + 6.0 + >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2 + 28.5 + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix + >>> y = torch.arange(9).reshape(3, 3) + tensor([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> torch.cumulative_trapezoid(y) + tensor([[ 0.5, 2.], + [ 3.5, 8.], + [ 6.5, 14.]]) + + >>> # Cumulatively computes the trapezoidal rule for each column of the matrix + >>> torch.cumulative_trapezoid(y, dim=0) + tensor([[ 1.5, 2.5, 3.5], + [ 6.0, 8.0, 10.0]]) + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with the same arbitrary spacing + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([1, 3, 6]) + >>> torch.cumulative_trapezoid(y, x) + tensor([[2., 5.], + [2., 5.], + [2., 5.]]) + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with different arbitrary spacing per row + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]]) + >>> torch.cumulative_trapezoid(y, x) + tensor([[1., 2.], + [2., 4.], + [3., 6.]]) + """ + ... +def deg2rad(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + deg2rad(input, *, out=None) -> Tensor + + Returns a new tensor with each of the elements of :attr:`input` + converted from angles in degrees to radians. + + Args: + input (Tensor): the input tensor. + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]]) + >>> torch.deg2rad(a) + tensor([[ 3.1416, -3.1416], + [ 6.2832, -6.2832], + [ 1.5708, -1.5708]]) + """ + ... +def deg2rad_(input: Tensor) -> Tensor: ... +@overload +def dequantize(input: Tensor) -> Tensor: + r""" + dequantize(tensor) -> Tensor + + Returns an fp32 Tensor by dequantizing a quantized Tensor + + Args: + tensor (Tensor): A quantized Tensor + + .. function:: dequantize(tensors) -> sequence of Tensors + :noindex: + + Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors + + Args: + tensors (sequence of Tensors): A list of quantized Tensors + """ + ... +@overload +def dequantize(tensors: Union[Tuple[Tensor, ...], List[Tensor]]) -> Tuple[Tensor, ...]: + r""" + dequantize(tensor) -> Tensor + + Returns an fp32 Tensor by dequantizing a quantized Tensor + + Args: + tensor (Tensor): A quantized Tensor + + .. function:: dequantize(tensors) -> sequence of Tensors + :noindex: + + Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors + + Args: + tensors (sequence of Tensors): A list of quantized Tensors + """ + ... +def det(input: Tensor) -> Tensor: + r""" + det(input) -> Tensor + + Alias for :func:`torch.linalg.det` + """ + ... +def detach(input: Tensor) -> Tensor: ... +def detach_(input: Tensor) -> Tensor: ... +def detach_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.detach`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def diag(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: + r""" + diag(input, diagonal=0, *, out=None) -> Tensor + + - If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor + with the elements of :attr:`input` as the diagonal. + - If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with + the diagonal elements of :attr:`input`. + + The argument :attr:`diagonal` controls which diagonal to consider: + + - If :attr:`diagonal` = 0, it is the main diagonal. + - If :attr:`diagonal` > 0, it is above the main diagonal. + - If :attr:`diagonal` < 0, it is below the main diagonal. + + Args: + input (Tensor): the input tensor. + diagonal (int, optional): the diagonal to consider + + Keyword args: + out (Tensor, optional): the output tensor. + + .. seealso:: + + :func:`torch.diagonal` always returns the diagonal of its input. + + :func:`torch.diagflat` always constructs a tensor with diagonal elements + specified by the input. + + Examples: + + Get the square matrix where the input vector is the diagonal:: + + >>> a = torch.randn(3) + >>> a + tensor([ 0.5950,-0.0872, 2.3298]) + >>> torch.diag(a) + tensor([[ 0.5950, 0.0000, 0.0000], + [ 0.0000,-0.0872, 0.0000], + [ 0.0000, 0.0000, 2.3298]]) + >>> torch.diag(a, 1) + tensor([[ 0.0000, 0.5950, 0.0000, 0.0000], + [ 0.0000, 0.0000,-0.0872, 0.0000], + [ 0.0000, 0.0000, 0.0000, 2.3298], + [ 0.0000, 0.0000, 0.0000, 0.0000]]) + + Get the k-th diagonal of a given matrix:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[-0.4264, 0.0255,-0.1064], + [ 0.8795,-0.2429, 0.1374], + [ 0.1029,-0.6482,-1.6300]]) + >>> torch.diag(a, 0) + tensor([-0.4264,-0.2429,-1.6300]) + >>> torch.diag(a, 1) + tensor([ 0.0255, 0.1374]) + """ + ... +def diag_embed(input: Tensor, offset: _int = 0, dim1: _int = -2, dim2: _int = -1) -> Tensor: + r""" + diag_embed(input, offset=0, dim1=-2, dim2=-1) -> Tensor + + Creates a tensor whose diagonals of certain 2D planes (specified by + :attr:`dim1` and :attr:`dim2`) are filled by :attr:`input`. + To facilitate creating batched diagonal matrices, the 2D planes formed by + the last two dimensions of the returned tensor are chosen by default. + + The argument :attr:`offset` controls which diagonal to consider: + + - If :attr:`offset` = 0, it is the main diagonal. + - If :attr:`offset` > 0, it is above the main diagonal. + - If :attr:`offset` < 0, it is below the main diagonal. + + The size of the new matrix will be calculated to make the specified diagonal + of the size of the last input dimension. + Note that for :attr:`offset` other than :math:`0`, the order of :attr:`dim1` + and :attr:`dim2` matters. Exchanging them is equivalent to changing the + sign of :attr:`offset`. + + Applying :meth:`torch.diagonal` to the output of this function with + the same arguments yields a matrix identical to input. However, + :meth:`torch.diagonal` has different default dimensions, so those + need to be explicitly specified. + + Args: + input (Tensor): the input tensor. Must be at least 1-dimensional. + offset (int, optional): which diagonal to consider. Default: 0 + (main diagonal). + dim1 (int, optional): first dimension with respect to which to + take diagonal. Default: -2. + dim2 (int, optional): second dimension with respect to which to + take diagonal. Default: -1. + + Example:: + + >>> a = torch.randn(2, 3) + >>> torch.diag_embed(a) + tensor([[[ 1.5410, 0.0000, 0.0000], + [ 0.0000, -0.2934, 0.0000], + [ 0.0000, 0.0000, -2.1788]], + + [[ 0.5684, 0.0000, 0.0000], + [ 0.0000, -1.0845, 0.0000], + [ 0.0000, 0.0000, -1.3986]]]) + + >>> torch.diag_embed(a, offset=1, dim1=0, dim2=2) + tensor([[[ 0.0000, 1.5410, 0.0000, 0.0000], + [ 0.0000, 0.5684, 0.0000, 0.0000]], + + [[ 0.0000, 0.0000, -0.2934, 0.0000], + [ 0.0000, 0.0000, -1.0845, 0.0000]], + + [[ 0.0000, 0.0000, 0.0000, -2.1788], + [ 0.0000, 0.0000, 0.0000, -1.3986]], + + [[ 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.0000]]]) + """ + ... +def diagflat(input: Tensor, offset: _int = 0) -> Tensor: + r""" + diagflat(input, offset=0) -> Tensor + + - If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor + with the elements of :attr:`input` as the diagonal. + - If :attr:`input` is a tensor with more than one dimension, then returns a + 2-D tensor with diagonal elements equal to a flattened :attr:`input`. + + The argument :attr:`offset` controls which diagonal to consider: + + - If :attr:`offset` = 0, it is the main diagonal. + - If :attr:`offset` > 0, it is above the main diagonal. + - If :attr:`offset` < 0, it is below the main diagonal. + + Args: + input (Tensor): the input tensor. + offset (int, optional): the diagonal to consider. Default: 0 (main + diagonal). + + Examples:: + + >>> a = torch.randn(3) + >>> a + tensor([-0.2956, -0.9068, 0.1695]) + >>> torch.diagflat(a) + tensor([[-0.2956, 0.0000, 0.0000], + [ 0.0000, -0.9068, 0.0000], + [ 0.0000, 0.0000, 0.1695]]) + >>> torch.diagflat(a, 1) + tensor([[ 0.0000, -0.2956, 0.0000, 0.0000], + [ 0.0000, 0.0000, -0.9068, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.1695], + [ 0.0000, 0.0000, 0.0000, 0.0000]]) + + >>> a = torch.randn(2, 2) + >>> a + tensor([[ 0.2094, -0.3018], + [-0.1516, 1.9342]]) + >>> torch.diagflat(a) + tensor([[ 0.2094, 0.0000, 0.0000, 0.0000], + [ 0.0000, -0.3018, 0.0000, 0.0000], + [ 0.0000, 0.0000, -0.1516, 0.0000], + [ 0.0000, 0.0000, 0.0000, 1.9342]]) + """ + ... +@overload +def diagonal(input: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor: + r""" + diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor + + Returns a partial view of :attr:`input` with the its diagonal elements + with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension + at the end of the shape. + + The argument :attr:`offset` controls which diagonal to consider: + + - If :attr:`offset` = 0, it is the main diagonal. + - If :attr:`offset` > 0, it is above the main diagonal. + - If :attr:`offset` < 0, it is below the main diagonal. + + Applying :meth:`torch.diag_embed` to the output of this function with + the same arguments yields a diagonal matrix with the diagonal entries + of the input. However, :meth:`torch.diag_embed` has different default + dimensions, so those need to be explicitly specified. + + Args: + input (Tensor): the input tensor. Must be at least 2-dimensional. + offset (int, optional): which diagonal to consider. Default: 0 + (main diagonal). + dim1 (int, optional): first dimension with respect to which to + take diagonal. Default: 0. + dim2 (int, optional): second dimension with respect to which to + take diagonal. Default: 1. + + .. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1. + + Examples:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[-1.0854, 1.1431, -0.1752], + [ 0.8536, -0.0905, 0.0360], + [ 0.6927, -0.3735, -0.4945]]) + + + >>> torch.diagonal(a, 0) + tensor([-1.0854, -0.0905, -0.4945]) + + + >>> torch.diagonal(a, 1) + tensor([ 1.1431, 0.0360]) + + + >>> x = torch.randn(2, 5, 4, 2) + >>> torch.diagonal(x, offset=-1, dim1=1, dim2=2) + tensor([[[-1.2631, 0.3755, -1.5977, -1.8172], + [-1.1065, 1.0401, -0.2235, -0.7938]], + + [[-1.7325, -0.3081, 0.6166, 0.2335], + [ 1.0500, 0.7336, -0.3836, -1.1015]]]) + """ + ... +@overload +def diagonal(input: Tensor, *, outdim: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None], dim2: Union[str, ellipsis, None], offset: _int = 0) -> Tensor: + r""" + diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor + + Returns a partial view of :attr:`input` with the its diagonal elements + with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension + at the end of the shape. + + The argument :attr:`offset` controls which diagonal to consider: + + - If :attr:`offset` = 0, it is the main diagonal. + - If :attr:`offset` > 0, it is above the main diagonal. + - If :attr:`offset` < 0, it is below the main diagonal. + + Applying :meth:`torch.diag_embed` to the output of this function with + the same arguments yields a diagonal matrix with the diagonal entries + of the input. However, :meth:`torch.diag_embed` has different default + dimensions, so those need to be explicitly specified. + + Args: + input (Tensor): the input tensor. Must be at least 2-dimensional. + offset (int, optional): which diagonal to consider. Default: 0 + (main diagonal). + dim1 (int, optional): first dimension with respect to which to + take diagonal. Default: 0. + dim2 (int, optional): second dimension with respect to which to + take diagonal. Default: 1. + + .. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1. + + Examples:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[-1.0854, 1.1431, -0.1752], + [ 0.8536, -0.0905, 0.0360], + [ 0.6927, -0.3735, -0.4945]]) + + + >>> torch.diagonal(a, 0) + tensor([-1.0854, -0.0905, -0.4945]) + + + >>> torch.diagonal(a, 1) + tensor([ 1.1431, 0.0360]) + + + >>> x = torch.randn(2, 5, 4, 2) + >>> torch.diagonal(x, offset=-1, dim1=1, dim2=2) + tensor([[[-1.2631, 0.3755, -1.5977, -1.8172], + [-1.1065, 1.0401, -0.2235, -0.7938]], + + [[-1.7325, -0.3081, 0.6166, 0.2335], + [ 1.0500, 0.7336, -0.3836, -1.1015]]]) + """ + ... +def diagonal_copy(input: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.diagonal`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def diagonal_scatter(input: Tensor, src: Tensor, offset: _int = 0, dim1: _int = 0, dim2: _int = 1) -> Tensor: + r""" + diagonal_scatter(input, src, offset=0, dim1=0, dim2=1) -> Tensor + + Embeds the values of the :attr:`src` tensor into :attr:`input` along + the diagonal elements of :attr:`input`, with respect to :attr:`dim1` + and :attr:`dim2`. + + This function returns a tensor with fresh storage; it does not + return a view. + + The argument :attr:`offset` controls which diagonal to consider: + + - If :attr:`offset` = 0, it is the main diagonal. + - If :attr:`offset` > 0, it is above the main diagonal. + - If :attr:`offset` < 0, it is below the main diagonal. + + Args: + input (Tensor): the input tensor. Must be at least 2-dimensional. + src (Tensor): the tensor to embed into :attr:`input`. + offset (int, optional): which diagonal to consider. Default: 0 + (main diagonal). + dim1 (int, optional): first dimension with respect to which to + take diagonal. Default: 0. + dim2 (int, optional): second dimension with respect to which to + take diagonal. Default: 1. + + .. note:: + + :attr:`src` must be of the proper size in order to be embedded + into :attr:`input`. Specifically, it should have the same shape as + ``torch.diagonal(input, offset, dim1, dim2)`` + + Examples:: + + >>> a = torch.zeros(3, 3) + >>> a + tensor([[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]]) + + >>> torch.diagonal_scatter(a, torch.ones(3), 0) + tensor([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + >>> torch.diagonal_scatter(a, torch.ones(2), 1) + tensor([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) + """ + ... +def diff(input: Tensor, n: _int = 1, dim: _int = -1, prepend: Optional[Tensor] = None, append: Optional[Tensor] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + diff(input, n=1, dim=-1, prepend=None, append=None) -> Tensor + + Computes the n-th forward difference along the given dimension. + + The first-order differences are given by `out[i] = input[i + 1] - input[i]`. Higher-order + differences are calculated by using :func:`torch.diff` recursively. + + Args: + input (Tensor): the tensor to compute the differences on + n (int, optional): the number of times to recursively compute the difference + dim (int, optional): the dimension to compute the difference along. + Default is the last dimension. + prepend, append (Tensor, optional): values to prepend or append to + :attr:`input` along :attr:`dim` before computing the difference. + Their dimensions must be equivalent to that of input, and their shapes + must match input's shape except on :attr:`dim`. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([1, 3, 2]) + >>> torch.diff(a) + tensor([ 2, -1]) + >>> b = torch.tensor([4, 5]) + >>> torch.diff(a, append=b) + tensor([ 2, -1, 2, 1]) + >>> c = torch.tensor([[1, 2, 3], [3, 4, 5]]) + >>> torch.diff(c, dim=0) + tensor([[2, 2, 2]]) + >>> torch.diff(c, dim=1) + tensor([[1, 1], + [1, 1]]) + """ + ... +def digamma(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + digamma(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.digamma`. + """ + ... +def dist(input: Tensor, other: Tensor, p: Union[Number, _complex] = 2) -> Tensor: + r""" + dist(input, other, p=2) -> Tensor + + Returns the p-norm of (:attr:`input` - :attr:`other`) + + The shapes of :attr:`input` and :attr:`other` must be + :ref:`broadcastable `. + + Args: + input (Tensor): the input tensor. + other (Tensor): the Right-hand-side input tensor + p (float, optional): the norm to be computed + + Example:: + + >>> x = torch.randn(4) + >>> x + tensor([-1.5393, -0.8675, 0.5916, 1.6321]) + >>> y = torch.randn(4) + >>> y + tensor([ 0.0967, -1.0511, 0.6295, 0.8360]) + >>> torch.dist(x, y, 3.5) + tensor(1.6727) + >>> torch.dist(x, y, 3) + tensor(1.6973) + >>> torch.dist(x, y, 0) + tensor(4.) + >>> torch.dist(x, y, 1) + tensor(2.6537) + """ + ... +def div(input: Union[Tensor, Number], other: Union[Tensor, Number], *, rounding_mode: Optional[str] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + div(input, other, *, rounding_mode=None, out=None) -> Tensor + + Divides each element of the input ``input`` by the corresponding element of + :attr:`other`. + + .. math:: + \text{out}_i = \frac{\text{input}_i}{\text{other}_i} + + .. note:: + By default, this performs a "true" division like Python 3. + See the :attr:`rounding_mode` argument for floor division. + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer, float, and complex inputs. + Always promotes integer types to the default scalar type. + + Args: + input (Tensor): the dividend + other (Tensor or Number): the divisor + + Keyword args: + rounding_mode (str, optional): Type of rounding applied to the result: + + * None - default behavior. Performs no rounding and, if both :attr:`input` and + :attr:`other` are integer types, promotes the inputs to the default scalar type. + Equivalent to true division in Python (the ``/`` operator) and NumPy's ``np.true_divide``. + * ``"trunc"`` - rounds the results of the division towards zero. + Equivalent to C-style integer division. + * ``"floor"`` - rounds the results of the division down. + Equivalent to floor division in Python (the ``//`` operator) and NumPy's ``np.floor_divide``. + + out (Tensor, optional): the output tensor. + + Examples:: + + >>> x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637]) + >>> torch.div(x, 0.5) + tensor([ 0.7620, 2.5548, -0.5944, -0.7438, 0.9274]) + + >>> a = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917], + ... [ 0.1815, -1.0111, 0.9805, -1.5923], + ... [ 0.1062, 1.4581, 0.7759, -1.2344], + ... [-0.1830, -0.0313, 1.1908, -1.4757]]) + >>> b = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308]) + >>> torch.div(a, b) + tensor([[-0.4620, -6.6051, 0.5676, 1.2639], + [ 0.2260, -3.4509, -1.2086, 6.8990], + [ 0.1322, 4.9764, -0.9564, 5.3484], + [-0.2278, -0.1068, -1.4678, 6.3938]]) + + >>> torch.div(a, b, rounding_mode='trunc') + tensor([[-0., -6., 0., 1.], + [ 0., -3., -1., 6.], + [ 0., 4., -0., 5.], + [-0., -0., -1., 6.]]) + + >>> torch.div(a, b, rounding_mode='floor') + tensor([[-1., -7., 0., 1.], + [ 0., -4., -2., 6.], + [ 0., 4., -1., 5.], + [-1., -1., -2., 6.]]) + """ + ... +@overload +def divide(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + divide(input, other, *, rounding_mode=None, out=None) -> Tensor + + Alias for :func:`torch.div`. + """ + ... +@overload +def divide(input: Tensor, other: Tensor, *, rounding_mode: Optional[str], out: Optional[Tensor] = None) -> Tensor: + r""" + divide(input, other, *, rounding_mode=None, out=None) -> Tensor + + Alias for :func:`torch.div`. + """ + ... +@overload +def divide(input: Tensor, other: Union[Number, _complex], *, rounding_mode: Optional[str]) -> Tensor: + r""" + divide(input, other, *, rounding_mode=None, out=None) -> Tensor + + Alias for :func:`torch.div`. + """ + ... +@overload +def divide(input: Tensor, other: Union[Number, _complex]) -> Tensor: + r""" + divide(input, other, *, rounding_mode=None, out=None) -> Tensor + + Alias for :func:`torch.div`. + """ + ... +def dot(input: Tensor, tensor: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + dot(input, other, *, out=None) -> Tensor + + Computes the dot product of two 1D tensors. + + .. note:: + + Unlike NumPy's dot, torch.dot intentionally only supports computing the dot product + of two 1D tensors with the same number of elements. + + Args: + input (Tensor): first tensor in the dot product, must be 1D. + other (Tensor): second tensor in the dot product, must be 1D. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1])) + tensor(7) + """ + ... +def dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def dsmm(input: Tensor, mat2: Tensor) -> Tensor: ... +@overload +def dsplit(input: Tensor, sections: _int) -> Tuple[Tensor, ...]: + r""" + dsplit(input, indices_or_sections) -> List of Tensors + + Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors + depthwise according to :attr:`indices_or_sections`. Each split is a view of + :attr:`input`. + + This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2) + (the split dimension is 2), except that if :attr:`indices_or_sections` is an integer + it must evenly divide the split dimension or a runtime error will be thrown. + + This function is based on NumPy's :func:`numpy.dsplit`. + + Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + + Example:: + >>> t = torch.arange(16.0).reshape(2, 2, 4) + >>> t + tensor([[[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]], + [[ 8., 9., 10., 11.], + [12., 13., 14., 15.]]]) + >>> torch.dsplit(t, 2) + (tensor([[[ 0., 1.], + [ 4., 5.]], + [[ 8., 9.], + [12., 13.]]]), + tensor([[[ 2., 3.], + [ 6., 7.]], + [[10., 11.], + [14., 15.]]])) + + >>> torch.dsplit(t, [3, 6]) + (tensor([[[ 0., 1., 2.], + [ 4., 5., 6.]], + [[ 8., 9., 10.], + [12., 13., 14.]]]), + tensor([[[ 3.], + [ 7.]], + [[11.], + [15.]]]), + tensor([], size=(2, 2, 0))) + """ + ... +@overload +def dsplit(input: Tensor, indices: _size) -> Tuple[Tensor, ...]: + r""" + dsplit(input, indices_or_sections) -> List of Tensors + + Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors + depthwise according to :attr:`indices_or_sections`. Each split is a view of + :attr:`input`. + + This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2) + (the split dimension is 2), except that if :attr:`indices_or_sections` is an integer + it must evenly divide the split dimension or a runtime error will be thrown. + + This function is based on NumPy's :func:`numpy.dsplit`. + + Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + + Example:: + >>> t = torch.arange(16.0).reshape(2, 2, 4) + >>> t + tensor([[[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]], + [[ 8., 9., 10., 11.], + [12., 13., 14., 15.]]]) + >>> torch.dsplit(t, 2) + (tensor([[[ 0., 1.], + [ 4., 5.]], + [[ 8., 9.], + [12., 13.]]]), + tensor([[[ 2., 3.], + [ 6., 7.]], + [[10., 11.], + [14., 15.]]])) + + >>> torch.dsplit(t, [3, 6]) + (tensor([[[ 0., 1., 2.], + [ 4., 5., 6.]], + [[ 8., 9., 10.], + [12., 13., 14.]]]), + tensor([[[ 3.], + [ 7.]], + [[11.], + [15.]]]), + tensor([], size=(2, 2, 0))) + """ + ... +def dstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: + r""" + dstack(tensors, *, out=None) -> Tensor + + Stack tensors in sequence depthwise (along third axis). + + This is equivalent to concatenation along the third axis after 1-D and 2-D tensors have been reshaped by :func:`torch.atleast_3d`. + + Args: + tensors (sequence of Tensors): sequence of tensors to concatenate + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5, 6]) + >>> torch.dstack((a,b)) + tensor([[[1, 4], + [2, 5], + [3, 6]]]) + >>> a = torch.tensor([[1],[2],[3]]) + >>> b = torch.tensor([[4],[5],[6]]) + >>> torch.dstack((a,b)) + tensor([[[1, 4]], + [[2, 5]], + [[3, 6]]]) + """ + ... +def embedding(weight: Tensor, indices: Tensor, padding_idx: Union[_int, SymInt] = -1, scale_grad_by_freq: _bool = False, sparse: _bool = False) -> Tensor: ... +@overload +def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool, mode: _int, sparse: _bool, per_sample_weights: Optional[Tensor], include_last_offset: _bool, padding_idx: Optional[_int]) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +@overload +def embedding_bag(weight: Tensor, indices: Tensor, offsets: Tensor, scale_grad_by_freq: _bool = False, mode: _int = 0, sparse: _bool = False, per_sample_weights: Optional[Tensor] = None, include_last_offset: _bool = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +def embedding_renorm_(input: Tensor, indices: Tensor, max_norm: _float, norm_type: _float) -> Tensor: ... +@overload +def empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor + + Returns a tensor filled with uninitialized data. The shape of the tensor is + defined by the variable argument :attr:`size`. + + .. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.contiguous_format``. + + Example:: + + >>> torch.empty((2,3), dtype=torch.int64) + tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13], + [ 7.5751e+18, 7.1428e+18, 7.5955e+18]]) + """ + ... +@overload +def empty(*size: _int, memory_format: Optional[memory_format] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor + + Returns a tensor filled with uninitialized data. The shape of the tensor is + defined by the variable argument :attr:`size`. + + .. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.contiguous_format``. + + Example:: + + >>> torch.empty((2,3), dtype=torch.int64) + tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13], + [ 7.5751e+18, 7.1428e+18, 7.5955e+18]]) + """ + ... +@overload +def empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor + + Returns a tensor filled with uninitialized data. The shape of the tensor is + defined by the variable argument :attr:`size`. + + .. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.contiguous_format``. + + Example:: + + >>> torch.empty((2,3), dtype=torch.int64) + tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13], + [ 7.5751e+18, 7.1428e+18, 7.5955e+18]]) + """ + ... +@overload +def empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, memory_format=torch.contiguous_format) -> Tensor + + Returns a tensor filled with uninitialized data. The shape of the tensor is + defined by the variable argument :attr:`size`. + + .. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.contiguous_format``. + + Example:: + + >>> torch.empty((2,3), dtype=torch.int64) + tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13], + [ 7.5751e+18, 7.1428e+18, 7.5955e+18]]) + """ + ... +def empty_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + empty_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + + Returns an uninitialized tensor with the same size as :attr:`input`. + ``torch.empty_like(input)`` is equivalent to + ``torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + + .. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + + Args: + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + + Example:: + + >>> a=torch.empty((2,3), dtype=torch.int32, device = 'cuda') + >>> torch.empty_like(a) + tensor([[0, 0, 0], + [0, 0, 0]], device='cuda:0', dtype=torch.int32) + """ + ... +def empty_permuted(size: Sequence[Union[_int, SymInt]], physical_layout: _size, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + empty_permuted(size, physical_layout, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Creates an uninitialized, non-overlapping and dense tensor with the + specified :attr:`size`, with :attr:`physical_layout` specifying how the + dimensions are physically laid out in memory (each logical dimension is listed + from outermost to innermost). :attr:`physical_layout` is a generalization + of NCHW/NHWC notation: if each dimension is assigned a number according to + what order they occur in size (N=0, C=1, H=2, W=3), then NCHW is ``(0, 1, 2, 3)`` + while NHWC is ``(0, 2, 3, 1)``. Equivalently, the strides of the output + tensor ``t`` are such that ``t.stride(physical_layout[i]) == contiguous_strides[i]`` + (notably, this function is *not* equivalent to ``torch.empty(size).permute(physical_layout)``). + + Unlike :func:`torch.empty_strided`, this is guaranteed to produce a dense + tensor with no overlaps. If possible, prefer using this function over + :func:`torch.empty_strided` or manual use of :func:`torch.as_strided`. + + .. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + + Args: + size (tuple of int): the shape of the output tensor + physical_layout (tuple of int): the ordering of dimensions physically in memory + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Examples: + + >>> torch.empty((2, 3, 5, 7)).stride() + (105, 35, 7, 1) + >>> torch.empty_permuted((2, 3, 5, 7), (0, 1, 2, 3)).stride() + (105, 35, 7, 1) + >>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).stride() + (105, 1, 21, 3) + >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).stride() + (105, 1, 21, 3) + >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).dim_order() + (0, 2, 3, 1) + """ + ... +def empty_quantized(size: _size, qtensor: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +def empty_strided(size: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + empty_strided(size, stride, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data. + + .. warning:: + If the constructed tensor is "overlapped" (with multiple indices referring to the same element + in memory) its behavior is undefined. + + .. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + + Args: + size (tuple of int): the shape of the output tensor + stride (tuple of int): the strides of the output tensor + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> a = torch.empty_strided((2, 3), (1, 2)) + >>> a + tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07], + [0.0000e+00, 0.0000e+00, 3.0705e-41]]) + >>> a.stride() + (1, 2) + >>> a.size() + torch.Size([2, 3]) + """ + ... +@overload +def eq(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + eq(input, other, *, out=None) -> Tensor + + Computes element-wise equality + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere + + Example:: + + >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[ True, False], + [False, True]]) + """ + ... +@overload +def eq(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + eq(input, other, *, out=None) -> Tensor + + Computes element-wise equality + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere + + Example:: + + >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[ True, False], + [False, True]]) + """ + ... +def equal(input: Tensor, other: Tensor) -> _bool: + r""" + equal(input, other) -> bool + + ``True`` if two tensors have the same size and elements, ``False`` otherwise. + + Example:: + + >>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2])) + True + """ + ... +def erf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + erf(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.erf`. + """ + ... +def erf_(input: Tensor) -> Tensor: ... +def erfc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + erfc(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.erfc`. + """ + ... +def erfc_(input: Tensor) -> Tensor: ... +def erfinv(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + erfinv(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.erfinv`. + """ + ... +def exp(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + exp(input, *, out=None) -> Tensor + + Returns a new tensor with the exponential of the elements + of the input tensor :attr:`input`. + + .. math:: + y_{i} = e^{x_{i}} + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.exp(torch.tensor([0, math.log(2.)])) + tensor([ 1., 2.]) + """ + ... +def exp2(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + exp2(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.exp2`. + """ + ... +def exp2_(input: Tensor) -> Tensor: ... +def exp_(input: Tensor) -> Tensor: ... +def expand_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, implicit: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.expand`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def expm1(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + expm1(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.expm1`. + """ + ... +def expm1_(input: Tensor) -> Tensor: ... +@overload +def eye(n: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 2-D tensor with ones on the diagonal and zeros elsewhere. + + Args: + n (int): the number of rows + m (int, optional): the number of columns with default being :attr:`n` + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere + + Example:: + + >>> torch.eye(3) + tensor([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + """ + ... +@overload +def eye(n: Union[_int, SymInt], m: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 2-D tensor with ones on the diagonal and zeros elsewhere. + + Args: + n (int): the number of rows + m (int, optional): the number of columns with default being :attr:`n` + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere + + Example:: + + >>> torch.eye(3) + tensor([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + """ + ... +def fake_quantize_per_channel_affine(input: Tensor, scale: Tensor, zero_point: Tensor, axis: _int, quant_min: _int, quant_max: _int) -> Tensor: + r""" + fake_quantize_per_channel_affine(input, scale, zero_point, axis, quant_min, quant_max) -> Tensor + + Returns a new tensor with the data in :attr:`input` fake quantized per channel using :attr:`scale`, + :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`, across the channel specified by :attr:`axis`. + + .. math:: + \text{output} = ( + min( + \text{quant\_max}, + max( + \text{quant\_min}, + \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point} + ) + ) - \text{zero\_point} + ) \times \text{scale} + + Args: + input (Tensor): the input value(s), in ``torch.float32`` + scale (Tensor): quantization scale, per channel in ``torch.float32`` + zero_point (Tensor): quantization zero_point, per channel in ``torch.int32`` or ``torch.half`` or ``torch.float32`` + axis (int32): channel axis + quant_min (int64): lower bound of the quantized domain + quant_max (int64): upper bound of the quantized domain + + Returns: + Tensor: A newly fake_quantized per channel ``torch.float32`` tensor + + Example:: + + >>> x = torch.randn(2, 2, 2) + >>> x + tensor([[[-0.2525, -0.0466], + [ 0.3491, -0.2168]], + + [[-0.5906, 1.6258], + [ 0.6444, -0.0542]]]) + >>> scales = (torch.randn(2) + 1) * 0.05 + >>> scales + tensor([0.0475, 0.0486]) + >>> zero_points = torch.zeros(2).to(torch.int32) + >>> zero_points + tensor([0, 0]) + >>> torch.fake_quantize_per_channel_affine(x, scales, zero_points, 1, 0, 255) + tensor([[[0.0000, 0.0000], + [0.3405, 0.0000]], + + [[0.0000, 1.6134], + [0.6323, 0.0000]]]) + """ + ... +@overload +def fake_quantize_per_tensor_affine(input: Tensor, scale: _float, zero_point: _int, quant_min: _int, quant_max: _int) -> Tensor: + r""" + fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor + + Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`, + :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`. + + .. math:: + \text{output} = ( + min( + \text{quant\_max}, + max( + \text{quant\_min}, + \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point} + ) + ) - \text{zero\_point} + ) \times \text{scale} + + Args: + input (Tensor): the input value(s), ``torch.float32`` tensor + scale (double scalar or ``float32`` Tensor): quantization scale + zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point + quant_min (int64): lower bound of the quantized domain + quant_max (int64): upper bound of the quantized domain + + Returns: + Tensor: A newly fake_quantized ``torch.float32`` tensor + + Example:: + + >>> x = torch.randn(4) + >>> x + tensor([ 0.0552, 0.9730, 0.3973, -1.0780]) + >>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255) + tensor([0.1000, 1.0000, 0.4000, 0.0000]) + >>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255) + tensor([0.1000, 1.0000, 0.4000, 0.0000]) + """ + ... +@overload +def fake_quantize_per_tensor_affine(input: Tensor, scale: Tensor, zero_point: Tensor, quant_min: _int, quant_max: _int) -> Tensor: + r""" + fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor + + Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`, + :attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`. + + .. math:: + \text{output} = ( + min( + \text{quant\_max}, + max( + \text{quant\_min}, + \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point} + ) + ) - \text{zero\_point} + ) \times \text{scale} + + Args: + input (Tensor): the input value(s), ``torch.float32`` tensor + scale (double scalar or ``float32`` Tensor): quantization scale + zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point + quant_min (int64): lower bound of the quantized domain + quant_max (int64): upper bound of the quantized domain + + Returns: + Tensor: A newly fake_quantized ``torch.float32`` tensor + + Example:: + + >>> x = torch.randn(4) + >>> x + tensor([ 0.0552, 0.9730, 0.3973, -1.0780]) + >>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255) + tensor([0.1000, 1.0000, 0.4000, 0.0000]) + >>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255) + tensor([0.1000, 1.0000, 0.4000, 0.0000]) + """ + ... +def fbgemm_linear_fp16_weight(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ... +def fbgemm_linear_fp16_weight_fp32_activation(input: Tensor, packed_weight: Tensor, bias: Tensor) -> Tensor: ... +def fbgemm_linear_int8_weight(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Union[Number, _complex], weight_zero_point: Union[Number, _complex], bias: Tensor) -> Tensor: ... +def fbgemm_linear_int8_weight_fp32_activation(input: Tensor, weight: Tensor, packed: Tensor, col_offsets: Tensor, weight_scale: Union[Number, _complex], weight_zero_point: Union[Number, _complex], bias: Tensor) -> Tensor: ... +def fbgemm_linear_quantize_weight(input: Tensor) -> Tuple[Tensor, Tensor, _float, _int]: ... +def fbgemm_pack_gemm_matrix_fp16(input: Tensor) -> Tensor: ... +@overload +def fbgemm_pack_quantized_matrix(input: Tensor) -> Tensor: ... +@overload +def fbgemm_pack_quantized_matrix(input: Tensor, K: _int, N: _int) -> Tensor: ... +def feature_alpha_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def feature_alpha_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def feature_dropout(input: Tensor, p: _float, train: _bool) -> Tensor: ... +def feature_dropout_(input: Tensor, p: _float, train: _bool) -> Tensor: ... +@overload +def fill(input: Tensor, value: Tensor) -> Tensor: ... +@overload +def fill(input: Tensor, value: Union[Number, _complex]) -> Tensor: ... +@overload +def fill_(input: Tensor, value: Tensor) -> Tensor: ... +@overload +def fill_(input: Tensor, value: Union[Number, _complex]) -> Tensor: ... +def fix(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + fix(input, *, out=None) -> Tensor + + Alias for :func:`torch.trunc` + """ + ... +def fix_(input: Tensor) -> Tensor: ... +@overload +def flatten(input: Tensor, start_dim: _int = 0, end_dim: _int = -1) -> Tensor: + r""" + flatten(input, start_dim=0, end_dim=-1) -> Tensor + + Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim` + are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened. + The order of elements in :attr:`input` is unchanged. + + Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view, + or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can + be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the + flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned. + + .. note:: + Flattening a zero-dimensional tensor will return a one-dimensional view. + + Args: + input (Tensor): the input tensor. + start_dim (int): the first dim to flatten + end_dim (int): the last dim to flatten + + Example:: + + >>> t = torch.tensor([[[1, 2], + ... [3, 4]], + ... [[5, 6], + ... [7, 8]]]) + >>> torch.flatten(t) + tensor([1, 2, 3, 4, 5, 6, 7, 8]) + >>> torch.flatten(t, start_dim=1) + tensor([[1, 2, 3, 4], + [5, 6, 7, 8]]) + """ + ... +@overload +def flatten(input: Tensor, start_dim: _int, end_dim: _int, out_dim: Union[str, ellipsis, None]) -> Tensor: + r""" + flatten(input, start_dim=0, end_dim=-1) -> Tensor + + Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim` + are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened. + The order of elements in :attr:`input` is unchanged. + + Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view, + or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can + be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the + flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned. + + .. note:: + Flattening a zero-dimensional tensor will return a one-dimensional view. + + Args: + input (Tensor): the input tensor. + start_dim (int): the first dim to flatten + end_dim (int): the last dim to flatten + + Example:: + + >>> t = torch.tensor([[[1, 2], + ... [3, 4]], + ... [[5, 6], + ... [7, 8]]]) + >>> torch.flatten(t) + tensor([1, 2, 3, 4, 5, 6, 7, 8]) + >>> torch.flatten(t, start_dim=1) + tensor([[1, 2, 3, 4], + [5, 6, 7, 8]]) + """ + ... +@overload +def flatten(input: Tensor, start_dim: Union[str, ellipsis, None], end_dim: Union[str, ellipsis, None], out_dim: Union[str, ellipsis, None]) -> Tensor: + r""" + flatten(input, start_dim=0, end_dim=-1) -> Tensor + + Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim` + are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened. + The order of elements in :attr:`input` is unchanged. + + Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view, + or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can + be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the + flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned. + + .. note:: + Flattening a zero-dimensional tensor will return a one-dimensional view. + + Args: + input (Tensor): the input tensor. + start_dim (int): the first dim to flatten + end_dim (int): the last dim to flatten + + Example:: + + >>> t = torch.tensor([[[1, 2], + ... [3, 4]], + ... [[5, 6], + ... [7, 8]]]) + >>> torch.flatten(t) + tensor([1, 2, 3, 4, 5, 6, 7, 8]) + >>> torch.flatten(t, start_dim=1) + tensor([[1, 2, 3, 4], + [5, 6, 7, 8]]) + """ + ... +@overload +def flatten(input: Tensor, dims: Sequence[Union[str, ellipsis, None]], out_dim: Union[str, ellipsis, None]) -> Tensor: + r""" + flatten(input, start_dim=0, end_dim=-1) -> Tensor + + Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim` + are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened. + The order of elements in :attr:`input` is unchanged. + + Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view, + or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can + be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the + flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned. + + .. note:: + Flattening a zero-dimensional tensor will return a one-dimensional view. + + Args: + input (Tensor): the input tensor. + start_dim (int): the first dim to flatten + end_dim (int): the last dim to flatten + + Example:: + + >>> t = torch.tensor([[[1, 2], + ... [3, 4]], + ... [[5, 6], + ... [7, 8]]]) + >>> torch.flatten(t) + tensor([1, 2, 3, 4, 5, 6, 7, 8]) + >>> torch.flatten(t, start_dim=1) + tensor([[1, 2, 3, 4], + [5, 6, 7, 8]]) + """ + ... +def flip(input: Tensor, dims: _size) -> Tensor: + r""" + flip(input, dims) -> Tensor + + Reverse the order of an n-D tensor along given axis in dims. + + .. note:: + `torch.flip` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flip`, + which returns a view in constant time. Since copying a tensor's data is more work than viewing that data, + `torch.flip` is expected to be slower than `np.flip`. + + Args: + input (Tensor): the input tensor. + dims (a list or tuple): axis to flip on + + Example:: + + >>> x = torch.arange(8).view(2, 2, 2) + >>> x + tensor([[[ 0, 1], + [ 2, 3]], + + [[ 4, 5], + [ 6, 7]]]) + >>> torch.flip(x, [0, 1]) + tensor([[[ 6, 7], + [ 4, 5]], + + [[ 2, 3], + [ 0, 1]]]) + """ + ... +def fliplr(input: Tensor) -> Tensor: + r""" + fliplr(input) -> Tensor + + Flip tensor in the left/right direction, returning a new tensor. + + Flip the entries in each row in the left/right direction. + Columns are preserved, but appear in a different order than before. + + Note: + Requires the tensor to be at least 2-D. + + .. note:: + `torch.fliplr` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.fliplr`, + which returns a view in constant time. Since copying a tensor's data is more work than viewing that data, + `torch.fliplr` is expected to be slower than `np.fliplr`. + + Args: + input (Tensor): Must be at least 2-dimensional. + + Example:: + + >>> x = torch.arange(4).view(2, 2) + >>> x + tensor([[0, 1], + [2, 3]]) + >>> torch.fliplr(x) + tensor([[1, 0], + [3, 2]]) + """ + ... +def flipud(input: Tensor) -> Tensor: + r""" + flipud(input) -> Tensor + + Flip tensor in the up/down direction, returning a new tensor. + + Flip the entries in each column in the up/down direction. + Rows are preserved, but appear in a different order than before. + + Note: + Requires the tensor to be at least 1-D. + + .. note:: + `torch.flipud` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flipud`, + which returns a view in constant time. Since copying a tensor's data is more work than viewing that data, + `torch.flipud` is expected to be slower than `np.flipud`. + + Args: + input (Tensor): Must be at least 1-dimensional. + + Example:: + + >>> x = torch.arange(4).view(2, 2) + >>> x + tensor([[0, 1], + [2, 3]]) + >>> torch.flipud(x) + tensor([[2, 3], + [0, 1]]) + """ + ... +@overload +def float_power(input: Tensor, exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + float_power(input, exponent, *, out=None) -> Tensor + + Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision. + If neither input is complex returns a ``torch.float64`` tensor, + and if one or more inputs is complex returns a ``torch.complex128`` tensor. + + .. note:: + This function always computes in double precision, unlike :func:`torch.pow`, + which implements more typical :ref:`type promotion `. + This is useful when the computation needs to be performed in a wider or more precise dtype, + or the results of the computation may contain fractional values not representable in the input dtypes, + like when an integer base is raised to a negative integer exponent. + + Args: + input (Tensor or Number): the base value(s) + exponent (Tensor or Number): the exponent value(s) + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randint(10, (4,)) + >>> a + tensor([6, 4, 7, 1]) + >>> torch.float_power(a, 2) + tensor([36., 16., 49., 1.], dtype=torch.float64) + + >>> a = torch.arange(1, 5) + >>> a + tensor([ 1, 2, 3, 4]) + >>> exp = torch.tensor([2, -3, 4, -5]) + >>> exp + tensor([ 2, -3, 4, -5]) + >>> torch.float_power(a, exp) + tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64) + """ + ... +@overload +def float_power(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + float_power(input, exponent, *, out=None) -> Tensor + + Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision. + If neither input is complex returns a ``torch.float64`` tensor, + and if one or more inputs is complex returns a ``torch.complex128`` tensor. + + .. note:: + This function always computes in double precision, unlike :func:`torch.pow`, + which implements more typical :ref:`type promotion `. + This is useful when the computation needs to be performed in a wider or more precise dtype, + or the results of the computation may contain fractional values not representable in the input dtypes, + like when an integer base is raised to a negative integer exponent. + + Args: + input (Tensor or Number): the base value(s) + exponent (Tensor or Number): the exponent value(s) + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randint(10, (4,)) + >>> a + tensor([6, 4, 7, 1]) + >>> torch.float_power(a, 2) + tensor([36., 16., 49., 1.], dtype=torch.float64) + + >>> a = torch.arange(1, 5) + >>> a + tensor([ 1, 2, 3, 4]) + >>> exp = torch.tensor([2, -3, 4, -5]) + >>> exp + tensor([ 2, -3, 4, -5]) + >>> torch.float_power(a, exp) + tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64) + """ + ... +@overload +def float_power(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + float_power(input, exponent, *, out=None) -> Tensor + + Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision. + If neither input is complex returns a ``torch.float64`` tensor, + and if one or more inputs is complex returns a ``torch.complex128`` tensor. + + .. note:: + This function always computes in double precision, unlike :func:`torch.pow`, + which implements more typical :ref:`type promotion `. + This is useful when the computation needs to be performed in a wider or more precise dtype, + or the results of the computation may contain fractional values not representable in the input dtypes, + like when an integer base is raised to a negative integer exponent. + + Args: + input (Tensor or Number): the base value(s) + exponent (Tensor or Number): the exponent value(s) + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randint(10, (4,)) + >>> a + tensor([6, 4, 7, 1]) + >>> torch.float_power(a, 2) + tensor([36., 16., 49., 1.], dtype=torch.float64) + + >>> a = torch.arange(1, 5) + >>> a + tensor([ 1, 2, 3, 4]) + >>> exp = torch.tensor([2, -3, 4, -5]) + >>> exp + tensor([ 2, -3, 4, -5]) + >>> torch.float_power(a, exp) + tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64) + """ + ... +def floor(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + floor(input, *, out=None) -> Tensor + + Returns a new tensor with the floor of the elements of :attr:`input`, + the largest integer less than or equal to each element. + + For integer inputs, follows the array-api convention of returning a + copy of the input tensor. + + .. math:: + \text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.8166, 1.5308, -0.2530, -0.2091]) + >>> torch.floor(a) + tensor([-1., 1., -1., -1.]) + """ + ... +def floor_(input: Tensor) -> Tensor: ... +def floor_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor] = None) -> Tensor: + r""" + floor_divide(input, other, *, out=None) -> Tensor + + .. note:: + + Before PyTorch 1.13 :func:`torch.floor_divide` incorrectly performed + truncation division. To restore the previous behavior use + :func:`torch.div` with ``rounding_mode='trunc'``. + + Computes :attr:`input` divided by :attr:`other`, elementwise, and floors + the result. + + .. math:: + \text{{out}}_i = \text{floor} \left( \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right) + + + + Supports broadcasting to a common shape, type promotion, and integer and float inputs. + + Args: + input (Tensor or Number): the dividend + other (Tensor or Number): the divisor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([4.0, 3.0]) + >>> b = torch.tensor([2.0, 2.0]) + >>> torch.floor_divide(a, b) + tensor([2.0, 1.0]) + >>> torch.floor_divide(a, 1.4) + tensor([2.0, 2.0]) + """ + ... +def fmax(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + fmax(input, other, *, out=None) -> Tensor + + Computes the element-wise maximum of :attr:`input` and :attr:`other`. + + This is like :func:`torch.maximum` except it handles NaNs differently: + if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the maximum. + Only if both elements are NaN is NaN propagated. + + This function is a wrapper around C++'s ``std::fmax`` and is similar to NumPy's ``fmax`` function. + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer and floating-point inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([9.7, float('nan'), 3.1, float('nan')]) + >>> b = torch.tensor([-2.2, 0.5, float('nan'), float('nan')]) + >>> torch.fmax(a, b) + tensor([9.7000, 0.5000, 3.1000, nan]) + """ + ... +def fmin(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + fmin(input, other, *, out=None) -> Tensor + + Computes the element-wise minimum of :attr:`input` and :attr:`other`. + + This is like :func:`torch.minimum` except it handles NaNs differently: + if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the minimum. + Only if both elements are NaN is NaN propagated. + + This function is a wrapper around C++'s ``std::fmin`` and is similar to NumPy's ``fmin`` function. + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer and floating-point inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([2.2, float('nan'), 2.1, float('nan')]) + >>> b = torch.tensor([-9.3, 0.1, float('nan'), float('nan')]) + >>> torch.fmin(a, b) + tensor([-9.3000, 0.1000, 2.1000, nan]) + """ + ... +@overload +def fmod(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + fmod(input, other, *, out=None) -> Tensor + + Applies C++'s `std::fmod `_ entrywise. + The result has the same sign as the dividend :attr:`input` and its absolute value + is less than that of :attr:`other`. + + This function may be defined in terms of :func:`torch.div` as + + .. code:: python + + torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer and float inputs. + + .. note:: + + When the divisor is zero, returns ``NaN`` for floating point dtypes + on both CPU and GPU; raises ``RuntimeError`` for integer division by + zero on CPU; Integer division by zero on GPU may return any value. + + .. note:: + + Complex inputs are not supported. In some cases, it is not mathematically + possible to satisfy the definition of a modulo operation with complex numbers. + + .. seealso:: + + :func:`torch.remainder` which implements Python's modulus operator. + This one is defined using division rounding down the result. + + Args: + input (Tensor): the dividend + other (Tensor or Scalar): the divisor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) + tensor([-1., -0., -1., 1., 0., 1.]) + >>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5) + tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000]) + """ + ... +@overload +def fmod(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + fmod(input, other, *, out=None) -> Tensor + + Applies C++'s `std::fmod `_ entrywise. + The result has the same sign as the dividend :attr:`input` and its absolute value + is less than that of :attr:`other`. + + This function may be defined in terms of :func:`torch.div` as + + .. code:: python + + torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer and float inputs. + + .. note:: + + When the divisor is zero, returns ``NaN`` for floating point dtypes + on both CPU and GPU; raises ``RuntimeError`` for integer division by + zero on CPU; Integer division by zero on GPU may return any value. + + .. note:: + + Complex inputs are not supported. In some cases, it is not mathematically + possible to satisfy the definition of a modulo operation with complex numbers. + + .. seealso:: + + :func:`torch.remainder` which implements Python's modulus operator. + This one is defined using division rounding down the result. + + Args: + input (Tensor): the dividend + other (Tensor or Scalar): the divisor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) + tensor([-1., -0., -1., 1., 0., 1.]) + >>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5) + tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000]) + """ + ... +def frac(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + frac(input, *, out=None) -> Tensor + + Computes the fractional portion of each element in :attr:`input`. + + .. math:: + \text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i}) + + Example:: + + >>> torch.frac(torch.tensor([1, 2.5, -3.2])) + tensor([ 0.0000, 0.5000, -0.2000]) + """ + ... +def frac_(input: Tensor) -> Tensor: ... +def frexp(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.frexp: + r""" + frexp(input, *, out=None) -> (Tensor mantissa, Tensor exponent) + + Decomposes :attr:`input` into mantissa and exponent tensors + such that :math:`\text{input} = \text{mantissa} \times 2^{\text{exponent}}`. + + The range of mantissa is the open interval (-1, 1). + + Supports float inputs. + + Args: + input (Tensor): the input tensor + + + Keyword args: + out (tuple, optional): the output tensors + + Example:: + + >>> x = torch.arange(9.) + >>> mantissa, exponent = torch.frexp(x) + >>> mantissa + tensor([0.0000, 0.5000, 0.5000, 0.7500, 0.5000, 0.6250, 0.7500, 0.8750, 0.5000]) + >>> exponent + tensor([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=torch.int32) + >>> torch.ldexp(mantissa, exponent) + tensor([0., 1., 2., 3., 4., 5., 6., 7., 8.]) + """ + ... +def frobenius_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +def from_file(filename: str, shared: Optional[_bool] = None, size: Optional[_int] = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + from_file(filename, shared=None, size=0, *, dtype=None, layout=None, device=None, pin_memory=False) + + Creates a CPU tensor with a storage backed by a memory-mapped file. + + If ``shared`` is True, then memory is shared between processes. All changes are written to the file. + If ``shared`` is False, then changes to the tensor do not affect the file. + + ``size`` is the number of elements in the Tensor. If ``shared`` is ``False``, then the file must contain + at least ``size * sizeof(dtype)`` bytes. If ``shared`` is ``True`` the file will be created if needed. + + .. note:: + Only CPU tensors can be mapped to files. + + .. note:: + For now, tensors with storages backed by a memory-mapped file cannot be created in pinned memory. + + + Args: + filename (str): file name to map + shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the + underlying `mmap(2) call `_) + size (int): number of elements in the tensor + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + >>> t = torch.randn(2, 5, dtype=torch.float64) + >>> t.numpy().tofile('storage.pt') + >>> t_mapped = torch.from_file('storage.pt', shared=False, size=10, dtype=torch.float64) + """ + ... +def from_numpy(ndarray) -> Tensor: + r""" + from_numpy(ndarray) -> Tensor + + Creates a :class:`Tensor` from a :class:`numpy.ndarray`. + + The returned tensor and :attr:`ndarray` share the same memory. Modifications to + the tensor will be reflected in the :attr:`ndarray` and vice versa. The returned + tensor is not resizable. + + It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``, + ``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``, + ``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``, + and ``bool``. + + .. warning:: + Writing to a tensor created from a read-only NumPy array is not supported and will result in undefined behavior. + + Example:: + + >>> a = numpy.array([1, 2, 3]) + >>> t = torch.from_numpy(a) + >>> t + tensor([ 1, 2, 3]) + >>> t[0] = -1 + >>> a + array([-1, 2, 3]) + """ + ... +def frombuffer(buffer: Any, *, dtype: _dtype, count: int = -1, offset: int = 0, requires_grad: _bool = False) -> Tensor: + r""" + frombuffer(buffer, *, dtype, count=-1, offset=0, requires_grad=False) -> Tensor + + Creates a 1-dimensional :class:`Tensor` from an object that implements + the Python buffer protocol. + + Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of + the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count` + elements. + + Note that either of the following must be true: + + 1. :attr:`count` is a positive non-zero number, and the total number of bytes + in the buffer is more than :attr:`offset` plus :attr:`count` times the size + (in bytes) of :attr:`dtype`. + + 2. :attr:`count` is negative, and the length (number of bytes) of the buffer + subtracted by the :attr:`offset` is a multiple of the size (in bytes) of + :attr:`dtype`. + + The returned tensor and buffer share the same memory. Modifications to + the tensor will be reflected in the buffer and vice versa. The returned + tensor is not resizable. + + .. note:: + This function increments the reference count for the object that + owns the shared memory. Therefore, such memory will not be deallocated + before the returned tensor goes out of scope. + + .. warning:: + This function's behavior is undefined when passed an object implementing + the buffer protocol whose data is not on the CPU. Doing so is likely to + cause a segmentation fault. + + .. warning:: + This function does not try to infer the :attr:`dtype` (hence, it is not + optional). Passing a different :attr:`dtype` than its source may result + in unexpected behavior. + + Args: + buffer (object): a Python object that exposes the buffer interface. + + Keyword args: + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + count (int, optional): the number of desired elements to be read. + If negative, all the elements (until the end of the buffer) will be + read. Default: -1. + offset (int, optional): the number of bytes to skip at the start of + the buffer. Default: 0. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> import array + >>> a = array.array('i', [1, 2, 3]) + >>> t = torch.frombuffer(a, dtype=torch.int32) + >>> t + tensor([ 1, 2, 3]) + >>> t[0] = -1 + >>> a + array([-1, 2, 3]) + + >>> # Interprets the signed char bytes as 32-bit integers. + >>> # Each 4 signed char elements will be interpreted as + >>> # 1 signed 32-bit integer. + >>> import array + >>> a = array.array('b', [-1, 0, 0, 0]) + >>> torch.frombuffer(a, dtype=torch.int32) + tensor([255], dtype=torch.int32) + """ + ... +@overload +def full(size: _size, fill_value: Union[Number, _complex], *, out: Optional[Tensor] = None, layout: _layout = strided, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The + tensor's dtype is inferred from :attr:`fill_value`. + + Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + fill_value (Scalar): the value to fill the output tensor with. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.full((2, 3), 3.141592) + tensor([[ 3.1416, 3.1416, 3.1416], + [ 3.1416, 3.1416, 3.1416]]) + """ + ... +@overload +def full(size: _size, fill_value: Union[Number, _complex], *, names: List[Union[str, None]], layout: _layout = strided, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The + tensor's dtype is inferred from :attr:`fill_value`. + + Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + fill_value (Scalar): the value to fill the output tensor with. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.full((2, 3), 3.141592) + tensor([[ 3.1416, 3.1416, 3.1416], + [ 3.1416, 3.1416, 3.1416]]) + """ + ... +@overload +def full(size: Sequence[Union[_int, SymInt]], fill_value: Union[Number, _complex], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The + tensor's dtype is inferred from :attr:`fill_value`. + + Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + fill_value (Scalar): the value to fill the output tensor with. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.full((2, 3), 3.141592) + tensor([[ 3.1416, 3.1416, 3.1416], + [ 3.1416, 3.1416, 3.1416]]) + """ + ... +@overload +def full(size: _size, fill_value: Union[Number, _complex], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The + tensor's dtype is inferred from :attr:`fill_value`. + + Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + fill_value (Scalar): the value to fill the output tensor with. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.full((2, 3), 3.141592) + tensor([[ 3.1416, 3.1416, 3.1416], + [ 3.1416, 3.1416, 3.1416]]) + """ + ... +def full_like(input: Tensor, fill_value: Union[Number, _complex], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + full_like(input, fill_value, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + + Returns a tensor with the same size as :attr:`input` filled with :attr:`fill_value`. + ``torch.full_like(input, fill_value)`` is equivalent to + ``torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device)``. + + Args: + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + fill_value: the number to fill the output tensor with. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... +def fused_moving_avg_obs_fake_quant(input: Tensor, observer_on: Tensor, fake_quant_on: Tensor, running_min: Tensor, running_max: Tensor, scale: Tensor, zero_point: Tensor, averaging_const: _float, quant_min: _int, quant_max: _int, ch_axis: _int, per_row_fake_quant: _bool = False, symmetric_quant: _bool = False) -> Tensor: ... +@overload +def gather(input: Tensor, dim: _int, index: Tensor, *, sparse_grad: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor + + Gathers values along an axis specified by `dim`. + + For a 3-D tensor the output is specified by:: + + out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0 + out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1 + out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2 + + :attr:`input` and :attr:`index` must have the same number of dimensions. + It is also required that ``index.size(d) <= input.size(d)`` for all + dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`. + Note that ``input`` and ``index`` do not broadcast against each other. + + Args: + input (Tensor): the source tensor + dim (int): the axis along which to index + index (LongTensor): the indices of elements to gather + + Keyword arguments: + sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor. + out (Tensor, optional): the destination tensor + + Example:: + + >>> t = torch.tensor([[1, 2], [3, 4]]) + >>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]])) + tensor([[ 1, 1], + [ 4, 3]]) + """ + ... +@overload +def gather(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, sparse_grad: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor + + Gathers values along an axis specified by `dim`. + + For a 3-D tensor the output is specified by:: + + out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0 + out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1 + out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2 + + :attr:`input` and :attr:`index` must have the same number of dimensions. + It is also required that ``index.size(d) <= input.size(d)`` for all + dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`. + Note that ``input`` and ``index`` do not broadcast against each other. + + Args: + input (Tensor): the source tensor + dim (int): the axis along which to index + index (LongTensor): the indices of elements to gather + + Keyword arguments: + sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor. + out (Tensor, optional): the destination tensor + + Example:: + + >>> t = torch.tensor([[1, 2], [3, 4]]) + >>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]])) + tensor([[ 1, 1], + [ 4, 3]]) + """ + ... +def gcd(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + gcd(input, other, *, out=None) -> Tensor + + Computes the element-wise greatest common divisor (GCD) of :attr:`input` and :attr:`other`. + + Both :attr:`input` and :attr:`other` must have integer types. + + .. note:: + This defines :math:`gcd(0, 0) = 0`. + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([5, 10, 15]) + >>> b = torch.tensor([3, 4, 5]) + >>> torch.gcd(a, b) + tensor([1, 2, 5]) + >>> c = torch.tensor([3]) + >>> torch.gcd(a, c) + tensor([1, 1, 3]) + """ + ... +def gcd_(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def ge(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + ge(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} \geq \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere + + Example:: + + >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[True, True], [False, True]]) + """ + ... +@overload +def ge(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + ge(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} \geq \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere + + Example:: + + >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[True, True], [False, True]]) + """ + ... +def geqrf(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.geqrf: + r""" + geqrf(input, *, out=None) -> (Tensor, Tensor) + + This is a low-level function for calling LAPACK's geqrf directly. This function + returns a namedtuple (a, tau) as defined in `LAPACK documentation for geqrf`_ . + + Computes a QR decomposition of :attr:`input`. + Both `Q` and `R` matrices are stored in the same output tensor `a`. + The elements of `R` are stored on and above the diagonal. + Elementary reflectors (or Householder vectors) implicitly defining matrix `Q` + are stored below the diagonal. + The results of this function can be used together with :func:`torch.linalg.householder_product` + to obtain the `Q` matrix or + with :func:`torch.ormqr`, which uses an implicit representation of the `Q` matrix, + for an efficient matrix-matrix multiplication. + + See `LAPACK documentation for geqrf`_ for further details. + + .. note:: + See also :func:`torch.linalg.qr`, which computes Q and R matrices, and :func:`torch.linalg.lstsq` + with the ``driver="gels"`` option for a function that can solve matrix equations using a QR decomposition. + + Args: + input (Tensor): the input matrix + + Keyword args: + out (tuple, optional): the output tuple of (Tensor, Tensor). Ignored if `None`. Default: `None`. + + .. _LAPACK documentation for geqrf: + http://www.netlib.org/lapack/explore-html/df/dc5/group__variants_g_ecomputational_ga3766ea903391b5cf9008132f7440ec7b.html + """ + ... +def ger(input: Tensor, vec2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + ger(input, vec2, *, out=None) -> Tensor + + Alias of :func:`torch.outer`. + + .. warning:: + This function is deprecated and will be removed in a future PyTorch release. + Use :func:`torch.outer` instead. + """ + ... +def get_default_dtype() -> _dtype: + r""" + get_default_dtype() -> torch.dtype + + Get the current default floating point :class:`torch.dtype`. + + Example:: + + >>> torch.get_default_dtype() # initial default for floating point is torch.float32 + torch.float32 + >>> torch.set_default_dtype(torch.float64) + >>> torch.get_default_dtype() # default is now changed to torch.float64 + torch.float64 + """ + ... +def get_num_interop_threads() -> _int: + r""" + get_num_interop_threads() -> int + + Returns the number of threads used for inter-op parallelism on CPU + (e.g. in JIT interpreter) + """ + ... +def get_num_threads() -> _int: + r""" + get_num_threads() -> int + + Returns the number of threads used for parallelizing CPU operations + """ + ... +@overload +def gradient(input: Tensor, *, spacing: Optional[Union[Number, _complex]] = None, dim: Optional[_int] = None, edge_order: _int = 1) -> Tuple[Tensor, ...]: + r""" + gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors + + Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in + one or more dimensions using the `second-order accurate central differences method + `_ and + either first or second order estimates at the boundaries. + + The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not + specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates + to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional + :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and + :math:`g(1, 2, 3)\ == input[1, 2, 3]`. + + When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. + This is detailed in the "Keyword Arguments" section below. + + The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is + accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be + improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative + is estimated using `Taylor’s theorem with remainder `_. + Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring + it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: + + .. math:: + \begin{aligned} + f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ + f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ + \end{aligned} + + Using the fact that :math:`f \in C^3` and solving the linear system, we derive: + + .. math:: + f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) + + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } + + .. note:: + We estimate the gradient of functions in complex domain + :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. + + The value of each partial derivative at the boundary points is computed differently. See edge_order below. + + Args: + input (``Tensor``): the tensor that represents the values of the function + + Keyword args: + spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify + how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then + the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the + indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding + indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). + Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for + the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then + the coordinates are (t0[1], t1[2], t2[3]) + + dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default + the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of + the :attr:`spacing` argument must correspond with the specified dims." + + edge_order (``int``, optional): 1 or 2, for `first-order + `_ or + `second-order `_ + estimation of the boundary ("edge") values, respectively. + + Examples:: + + >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] + >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) + >>> values = torch.tensor([4., 1., 1., 16.], ) + >>> torch.gradient(values, spacing = coordinates) + (tensor([-3., -2., 2., 5.]),) + + >>> # Estimates the gradient of the R^2 -> R function whose samples are + >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost + >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates + >>> # partial derivative for both dimensions. + >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) + >>> torch.gradient(t) + (tensor([[ 9., 18., 36., 72.], + [ 9., 18., 36., 72.]]), + tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]])) + + >>> # A scalar value for spacing modifies the relationship between tensor indices + >>> # and input coordinates by multiplying the indices to find the + >>> # coordinates. For example, below the indices of the innermost + >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of + >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], + [ 5.0000, 7.5000, 15.0000, 20.0000]])) + >>> # doubling the spacing between samples halves the estimated partial gradients. + + >>> + >>> # Estimates only the partial derivative for dimension 1 + >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) + (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]]),) + + >>> # When spacing is a list of scalars, the relationship between the tensor + >>> # indices and input coordinates changes based on dimension. + >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate + >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension + >>> # 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = [3., 2.]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + + >>> # The following example is a replication of the previous one with explicit + >>> # coordinates. + >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) + >>> torch.gradient(t, spacing = coords) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + """ + ... +@overload +def gradient(input: Tensor, *, spacing: Sequence[Union[Number, _complex]], dim: Optional[_int] = None, edge_order: _int = 1) -> Tuple[Tensor, ...]: + r""" + gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors + + Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in + one or more dimensions using the `second-order accurate central differences method + `_ and + either first or second order estimates at the boundaries. + + The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not + specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates + to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional + :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and + :math:`g(1, 2, 3)\ == input[1, 2, 3]`. + + When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. + This is detailed in the "Keyword Arguments" section below. + + The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is + accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be + improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative + is estimated using `Taylor’s theorem with remainder `_. + Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring + it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: + + .. math:: + \begin{aligned} + f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ + f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ + \end{aligned} + + Using the fact that :math:`f \in C^3` and solving the linear system, we derive: + + .. math:: + f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) + + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } + + .. note:: + We estimate the gradient of functions in complex domain + :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. + + The value of each partial derivative at the boundary points is computed differently. See edge_order below. + + Args: + input (``Tensor``): the tensor that represents the values of the function + + Keyword args: + spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify + how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then + the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the + indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding + indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). + Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for + the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then + the coordinates are (t0[1], t1[2], t2[3]) + + dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default + the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of + the :attr:`spacing` argument must correspond with the specified dims." + + edge_order (``int``, optional): 1 or 2, for `first-order + `_ or + `second-order `_ + estimation of the boundary ("edge") values, respectively. + + Examples:: + + >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] + >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) + >>> values = torch.tensor([4., 1., 1., 16.], ) + >>> torch.gradient(values, spacing = coordinates) + (tensor([-3., -2., 2., 5.]),) + + >>> # Estimates the gradient of the R^2 -> R function whose samples are + >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost + >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates + >>> # partial derivative for both dimensions. + >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) + >>> torch.gradient(t) + (tensor([[ 9., 18., 36., 72.], + [ 9., 18., 36., 72.]]), + tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]])) + + >>> # A scalar value for spacing modifies the relationship between tensor indices + >>> # and input coordinates by multiplying the indices to find the + >>> # coordinates. For example, below the indices of the innermost + >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of + >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], + [ 5.0000, 7.5000, 15.0000, 20.0000]])) + >>> # doubling the spacing between samples halves the estimated partial gradients. + + >>> + >>> # Estimates only the partial derivative for dimension 1 + >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) + (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]]),) + + >>> # When spacing is a list of scalars, the relationship between the tensor + >>> # indices and input coordinates changes based on dimension. + >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate + >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension + >>> # 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = [3., 2.]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + + >>> # The following example is a replication of the previous one with explicit + >>> # coordinates. + >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) + >>> torch.gradient(t, spacing = coords) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + """ + ... +@overload +def gradient(input: Tensor, *, spacing: Sequence[Union[Number, _complex]], dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]: + r""" + gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors + + Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in + one or more dimensions using the `second-order accurate central differences method + `_ and + either first or second order estimates at the boundaries. + + The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not + specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates + to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional + :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and + :math:`g(1, 2, 3)\ == input[1, 2, 3]`. + + When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. + This is detailed in the "Keyword Arguments" section below. + + The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is + accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be + improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative + is estimated using `Taylor’s theorem with remainder `_. + Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring + it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: + + .. math:: + \begin{aligned} + f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ + f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ + \end{aligned} + + Using the fact that :math:`f \in C^3` and solving the linear system, we derive: + + .. math:: + f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) + + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } + + .. note:: + We estimate the gradient of functions in complex domain + :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. + + The value of each partial derivative at the boundary points is computed differently. See edge_order below. + + Args: + input (``Tensor``): the tensor that represents the values of the function + + Keyword args: + spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify + how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then + the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the + indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding + indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). + Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for + the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then + the coordinates are (t0[1], t1[2], t2[3]) + + dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default + the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of + the :attr:`spacing` argument must correspond with the specified dims." + + edge_order (``int``, optional): 1 or 2, for `first-order + `_ or + `second-order `_ + estimation of the boundary ("edge") values, respectively. + + Examples:: + + >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] + >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) + >>> values = torch.tensor([4., 1., 1., 16.], ) + >>> torch.gradient(values, spacing = coordinates) + (tensor([-3., -2., 2., 5.]),) + + >>> # Estimates the gradient of the R^2 -> R function whose samples are + >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost + >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates + >>> # partial derivative for both dimensions. + >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) + >>> torch.gradient(t) + (tensor([[ 9., 18., 36., 72.], + [ 9., 18., 36., 72.]]), + tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]])) + + >>> # A scalar value for spacing modifies the relationship between tensor indices + >>> # and input coordinates by multiplying the indices to find the + >>> # coordinates. For example, below the indices of the innermost + >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of + >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], + [ 5.0000, 7.5000, 15.0000, 20.0000]])) + >>> # doubling the spacing between samples halves the estimated partial gradients. + + >>> + >>> # Estimates only the partial derivative for dimension 1 + >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) + (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]]),) + + >>> # When spacing is a list of scalars, the relationship between the tensor + >>> # indices and input coordinates changes based on dimension. + >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate + >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension + >>> # 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = [3., 2.]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + + >>> # The following example is a replication of the previous one with explicit + >>> # coordinates. + >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) + >>> torch.gradient(t, spacing = coords) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + """ + ... +@overload +def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: Optional[_int] = None, edge_order: _int = 1) -> Tuple[Tensor, ...]: + r""" + gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors + + Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in + one or more dimensions using the `second-order accurate central differences method + `_ and + either first or second order estimates at the boundaries. + + The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not + specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates + to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional + :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and + :math:`g(1, 2, 3)\ == input[1, 2, 3]`. + + When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. + This is detailed in the "Keyword Arguments" section below. + + The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is + accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be + improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative + is estimated using `Taylor’s theorem with remainder `_. + Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring + it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: + + .. math:: + \begin{aligned} + f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ + f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ + \end{aligned} + + Using the fact that :math:`f \in C^3` and solving the linear system, we derive: + + .. math:: + f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) + + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } + + .. note:: + We estimate the gradient of functions in complex domain + :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. + + The value of each partial derivative at the boundary points is computed differently. See edge_order below. + + Args: + input (``Tensor``): the tensor that represents the values of the function + + Keyword args: + spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify + how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then + the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the + indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding + indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). + Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for + the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then + the coordinates are (t0[1], t1[2], t2[3]) + + dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default + the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of + the :attr:`spacing` argument must correspond with the specified dims." + + edge_order (``int``, optional): 1 or 2, for `first-order + `_ or + `second-order `_ + estimation of the boundary ("edge") values, respectively. + + Examples:: + + >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] + >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) + >>> values = torch.tensor([4., 1., 1., 16.], ) + >>> torch.gradient(values, spacing = coordinates) + (tensor([-3., -2., 2., 5.]),) + + >>> # Estimates the gradient of the R^2 -> R function whose samples are + >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost + >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates + >>> # partial derivative for both dimensions. + >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) + >>> torch.gradient(t) + (tensor([[ 9., 18., 36., 72.], + [ 9., 18., 36., 72.]]), + tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]])) + + >>> # A scalar value for spacing modifies the relationship between tensor indices + >>> # and input coordinates by multiplying the indices to find the + >>> # coordinates. For example, below the indices of the innermost + >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of + >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], + [ 5.0000, 7.5000, 15.0000, 20.0000]])) + >>> # doubling the spacing between samples halves the estimated partial gradients. + + >>> + >>> # Estimates only the partial derivative for dimension 1 + >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) + (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]]),) + + >>> # When spacing is a list of scalars, the relationship between the tensor + >>> # indices and input coordinates changes based on dimension. + >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate + >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension + >>> # 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = [3., 2.]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + + >>> # The following example is a replication of the previous one with explicit + >>> # coordinates. + >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) + >>> torch.gradient(t, spacing = coords) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + """ + ... +@overload +def gradient(input: Tensor, *, spacing: Union[Number, _complex], dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]: + r""" + gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors + + Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in + one or more dimensions using the `second-order accurate central differences method + `_ and + either first or second order estimates at the boundaries. + + The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not + specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates + to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional + :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and + :math:`g(1, 2, 3)\ == input[1, 2, 3]`. + + When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. + This is detailed in the "Keyword Arguments" section below. + + The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is + accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be + improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative + is estimated using `Taylor’s theorem with remainder `_. + Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring + it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: + + .. math:: + \begin{aligned} + f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ + f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ + \end{aligned} + + Using the fact that :math:`f \in C^3` and solving the linear system, we derive: + + .. math:: + f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) + + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } + + .. note:: + We estimate the gradient of functions in complex domain + :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. + + The value of each partial derivative at the boundary points is computed differently. See edge_order below. + + Args: + input (``Tensor``): the tensor that represents the values of the function + + Keyword args: + spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify + how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then + the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the + indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding + indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). + Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for + the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then + the coordinates are (t0[1], t1[2], t2[3]) + + dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default + the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of + the :attr:`spacing` argument must correspond with the specified dims." + + edge_order (``int``, optional): 1 or 2, for `first-order + `_ or + `second-order `_ + estimation of the boundary ("edge") values, respectively. + + Examples:: + + >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] + >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) + >>> values = torch.tensor([4., 1., 1., 16.], ) + >>> torch.gradient(values, spacing = coordinates) + (tensor([-3., -2., 2., 5.]),) + + >>> # Estimates the gradient of the R^2 -> R function whose samples are + >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost + >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates + >>> # partial derivative for both dimensions. + >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) + >>> torch.gradient(t) + (tensor([[ 9., 18., 36., 72.], + [ 9., 18., 36., 72.]]), + tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]])) + + >>> # A scalar value for spacing modifies the relationship between tensor indices + >>> # and input coordinates by multiplying the indices to find the + >>> # coordinates. For example, below the indices of the innermost + >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of + >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], + [ 5.0000, 7.5000, 15.0000, 20.0000]])) + >>> # doubling the spacing between samples halves the estimated partial gradients. + + >>> + >>> # Estimates only the partial derivative for dimension 1 + >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) + (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]]),) + + >>> # When spacing is a list of scalars, the relationship between the tensor + >>> # indices and input coordinates changes based on dimension. + >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate + >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension + >>> # 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = [3., 2.]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + + >>> # The following example is a replication of the previous one with explicit + >>> # coordinates. + >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) + >>> torch.gradient(t, spacing = coords) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + """ + ... +@overload +def gradient(input: Tensor, *, spacing: Union[Tuple[Tensor, ...], List[Tensor]], dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]: + r""" + gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors + + Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in + one or more dimensions using the `second-order accurate central differences method + `_ and + either first or second order estimates at the boundaries. + + The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not + specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates + to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional + :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and + :math:`g(1, 2, 3)\ == input[1, 2, 3]`. + + When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. + This is detailed in the "Keyword Arguments" section below. + + The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is + accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be + improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative + is estimated using `Taylor’s theorem with remainder `_. + Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring + it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: + + .. math:: + \begin{aligned} + f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ + f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ + \end{aligned} + + Using the fact that :math:`f \in C^3` and solving the linear system, we derive: + + .. math:: + f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) + + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } + + .. note:: + We estimate the gradient of functions in complex domain + :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. + + The value of each partial derivative at the boundary points is computed differently. See edge_order below. + + Args: + input (``Tensor``): the tensor that represents the values of the function + + Keyword args: + spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify + how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then + the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the + indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding + indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). + Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for + the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then + the coordinates are (t0[1], t1[2], t2[3]) + + dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default + the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of + the :attr:`spacing` argument must correspond with the specified dims." + + edge_order (``int``, optional): 1 or 2, for `first-order + `_ or + `second-order `_ + estimation of the boundary ("edge") values, respectively. + + Examples:: + + >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] + >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) + >>> values = torch.tensor([4., 1., 1., 16.], ) + >>> torch.gradient(values, spacing = coordinates) + (tensor([-3., -2., 2., 5.]),) + + >>> # Estimates the gradient of the R^2 -> R function whose samples are + >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost + >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates + >>> # partial derivative for both dimensions. + >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) + >>> torch.gradient(t) + (tensor([[ 9., 18., 36., 72.], + [ 9., 18., 36., 72.]]), + tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]])) + + >>> # A scalar value for spacing modifies the relationship between tensor indices + >>> # and input coordinates by multiplying the indices to find the + >>> # coordinates. For example, below the indices of the innermost + >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of + >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], + [ 5.0000, 7.5000, 15.0000, 20.0000]])) + >>> # doubling the spacing between samples halves the estimated partial gradients. + + >>> + >>> # Estimates only the partial derivative for dimension 1 + >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) + (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]]),) + + >>> # When spacing is a list of scalars, the relationship between the tensor + >>> # indices and input coordinates changes based on dimension. + >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate + >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension + >>> # 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = [3., 2.]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + + >>> # The following example is a replication of the previous one with explicit + >>> # coordinates. + >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) + >>> torch.gradient(t, spacing = coords) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + """ + ... +@overload +def gradient(input: Tensor, *, dim: _size, edge_order: _int = 1) -> Tuple[Tensor, ...]: + r""" + gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors + + Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in + one or more dimensions using the `second-order accurate central differences method + `_ and + either first or second order estimates at the boundaries. + + The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not + specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates + to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional + :attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and + :math:`g(1, 2, 3)\ == input[1, 2, 3]`. + + When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. + This is detailed in the "Keyword Arguments" section below. + + The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is + accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be + improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative + is estimated using `Taylor’s theorem with remainder `_. + Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring + it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: + + .. math:: + \begin{aligned} + f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ + f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ + \end{aligned} + + Using the fact that :math:`f \in C^3` and solving the linear system, we derive: + + .. math:: + f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) + + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } + + .. note:: + We estimate the gradient of functions in complex domain + :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. + + The value of each partial derivative at the boundary points is computed differently. See edge_order below. + + Args: + input (``Tensor``): the tensor that represents the values of the function + + Keyword args: + spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify + how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then + the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the + indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding + indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). + Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for + the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then + the coordinates are (t0[1], t1[2], t2[3]) + + dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default + the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of + the :attr:`spacing` argument must correspond with the specified dims." + + edge_order (``int``, optional): 1 or 2, for `first-order + `_ or + `second-order `_ + estimation of the boundary ("edge") values, respectively. + + Examples:: + + >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] + >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) + >>> values = torch.tensor([4., 1., 1., 16.], ) + >>> torch.gradient(values, spacing = coordinates) + (tensor([-3., -2., 2., 5.]),) + + >>> # Estimates the gradient of the R^2 -> R function whose samples are + >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost + >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates + >>> # partial derivative for both dimensions. + >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) + >>> torch.gradient(t) + (tensor([[ 9., 18., 36., 72.], + [ 9., 18., 36., 72.]]), + tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]])) + + >>> # A scalar value for spacing modifies the relationship between tensor indices + >>> # and input coordinates by multiplying the indices to find the + >>> # coordinates. For example, below the indices of the innermost + >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of + >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], + [ 5.0000, 7.5000, 15.0000, 20.0000]])) + >>> # doubling the spacing between samples halves the estimated partial gradients. + + >>> + >>> # Estimates only the partial derivative for dimension 1 + >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) + (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]]),) + + >>> # When spacing is a list of scalars, the relationship between the tensor + >>> # indices and input coordinates changes based on dimension. + >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate + >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension + >>> # 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = [3., 2.]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + + >>> # The following example is a replication of the previous one with explicit + >>> # coordinates. + >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) + >>> torch.gradient(t, spacing = coords) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + """ + ... +@overload +def greater(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + greater(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.gt`. + """ + ... +@overload +def greater(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + greater(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.gt`. + """ + ... +@overload +def greater_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + greater_equal(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.ge`. + """ + ... +@overload +def greater_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + greater_equal(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.ge`. + """ + ... +def grid_sampler(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ... +def grid_sampler_2d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ... +def grid_sampler_3d(input: Tensor, grid: Tensor, interpolation_mode: _int, padding_mode: _int, align_corners: _bool) -> Tensor: ... +def group_norm(input: Tensor, num_groups: _int, weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: _float = 1e-05, cudnn_enabled: _bool = True) -> Tensor: ... +@overload +def gru(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ... +@overload +def gru(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ... +def gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ... +@overload +def gt(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + gt(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} > \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere + + Example:: + + >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, True], [False, False]]) + """ + ... +@overload +def gt(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + gt(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} > \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere + + Example:: + + >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, True], [False, False]]) + """ + ... +@overload +def hamming_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Hamming window function. + + .. math:: + w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right), + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.hamming_window(L, periodic=True)`` equal to + ``torch.hamming_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + .. note:: + This is a generalized version of :meth:`torch.hann_window`. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + alpha (float, optional): The coefficient :math:`\alpha` in the equation above + beta (float, optional): The coefficient :math:`\beta` in the equation above + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window. + """ + ... +@overload +def hamming_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Hamming window function. + + .. math:: + w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right), + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.hamming_window(L, periodic=True)`` equal to + ``torch.hamming_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + .. note:: + This is a generalized version of :meth:`torch.hann_window`. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + alpha (float, optional): The coefficient :math:`\alpha` in the equation above + beta (float, optional): The coefficient :math:`\beta` in the equation above + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window. + """ + ... +@overload +def hamming_window(window_length: _int, periodic: _bool, alpha: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Hamming window function. + + .. math:: + w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right), + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.hamming_window(L, periodic=True)`` equal to + ``torch.hamming_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + .. note:: + This is a generalized version of :meth:`torch.hann_window`. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + alpha (float, optional): The coefficient :math:`\alpha` in the equation above + beta (float, optional): The coefficient :math:`\beta` in the equation above + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window. + """ + ... +@overload +def hamming_window(window_length: _int, periodic: _bool, alpha: _float, beta: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Hamming window function. + + .. math:: + w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right), + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.hamming_window(L, periodic=True)`` equal to + ``torch.hamming_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + .. note:: + This is a generalized version of :meth:`torch.hann_window`. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + alpha (float, optional): The coefficient :math:`\alpha` in the equation above + beta (float, optional): The coefficient :math:`\beta` in the equation above + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window. + """ + ... +@overload +def hann_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + hann_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Hann window function. + + .. math:: + w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] = + \sin^2 \left( \frac{\pi n}{N - 1} \right), + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.hann_window(L, periodic=True)`` equal to + ``torch.hann_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window + """ + ... +@overload +def hann_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + hann_window(window_length, periodic=True, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Hann window function. + + .. math:: + w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] = + \sin^2 \left( \frac{\pi n}{N - 1} \right), + + where :math:`N` is the full window size. + + The input :attr:`window_length` is a positive integer controlling the + returned window size. :attr:`periodic` flag determines whether the returned + window trims off the last duplicate value from the symmetric window and is + ready to be used as a periodic window with functions like + :meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in + above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have + ``torch.hann_window(L, periodic=True)`` equal to + ``torch.hann_window(L + 1, periodic=False)[:-1])``. + + .. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + + Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Returns: + Tensor: A 1-D tensor of size :math:`(\text{window\_length},)` containing the window + """ + ... +def hardshrink(input: Tensor, lambd: Union[Number, _complex] = 0.5, *, out: Optional[Tensor] = None) -> Tensor: ... +def heaviside(input: Tensor, values: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + heaviside(input, values, *, out=None) -> Tensor + + Computes the Heaviside step function for each element in :attr:`input`. + The Heaviside step function is defined as: + + .. math:: + \text{{heaviside}}(input, values) = \begin{cases} + 0, & \text{if input < 0}\\ + values, & \text{if input == 0}\\ + 1, & \text{if input > 0} + \end{cases} + + + Args: + input (Tensor): the input tensor. + values (Tensor): The values to use where :attr:`input` is zero. + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> input = torch.tensor([-1.5, 0, 2.0]) + >>> values = torch.tensor([0.5]) + >>> torch.heaviside(input, values) + tensor([0.0000, 0.5000, 1.0000]) + >>> values = torch.tensor([1.2, -2.0, 3.5]) + >>> torch.heaviside(input, values) + tensor([0., -2., 1.]) + """ + ... +def hinge_embedding_loss(input: Tensor, target: Tensor, margin: _float = 1.0, reduction: _int = 1) -> Tensor: ... +def histc(input: Tensor, bins: _int = 100, min: Union[Number, _complex] = 0, max: Union[Number, _complex] = 0, *, out: Optional[Tensor] = None) -> Tensor: + r""" + histc(input, bins=100, min=0, max=0, *, out=None) -> Tensor + + Computes the histogram of a tensor. + + The elements are sorted into equal width bins between :attr:`min` and + :attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and + maximum values of the data are used. + + Elements lower than min and higher than max and ``NaN`` elements are ignored. + + Args: + input (Tensor): the input tensor. + bins (int): number of histogram bins + min (Scalar): lower end of the range (inclusive) + max (Scalar): upper end of the range (inclusive) + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + Tensor: Histogram represented as a tensor + + Example:: + + >>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3) + tensor([ 0., 2., 1., 0.]) + """ + ... +@overload +def histogram(input: Tensor, bins: Tensor, *, weight: Optional[Tensor] = None, density: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.histogram: + r""" + histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor) + + Computes a histogram of the values in a tensor. + + :attr:`bins` can be an integer or a 1D tensor. + + If :attr:`bins` is an int, it specifies the number of equal-width bins. + By default, the lower and upper range of the bins is determined by the + minimum and maximum elements of the input tensor. The :attr:`range` + argument can be provided to specify a range for the bins. + + If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges + including the rightmost edge. It should contain at least 2 elements + and its elements should be increasing. + + Args: + input (Tensor): the input tensor. + bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor, + defines the sequence of bin edges including the rightmost edge. + + Keyword args: + range (tuple of float): Defines the range of the bins. + weight (Tensor): If provided, weight should have the same shape as input. Each value in + input contributes its associated weight towards its bin's result. + density (bool): If False, the result will contain the count (or total weight) in each bin. + If True, the result is the value of the probability density function over the bins, + normalized such that the integral over the range of the bins is 1. + out (Tensor, optional): the output tensor. (tuple, optional): The result tuple of two output tensors (hist, bin_edges). + + Returns: + hist (Tensor): 1D Tensor containing the values of the histogram. + bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins. + + Example:: + + >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.])) + (tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.])) + >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True) + (tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.])) + """ + ... +@overload +def histogram(input: Tensor, bins: _int = 100, *, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.histogram: + r""" + histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor) + + Computes a histogram of the values in a tensor. + + :attr:`bins` can be an integer or a 1D tensor. + + If :attr:`bins` is an int, it specifies the number of equal-width bins. + By default, the lower and upper range of the bins is determined by the + minimum and maximum elements of the input tensor. The :attr:`range` + argument can be provided to specify a range for the bins. + + If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges + including the rightmost edge. It should contain at least 2 elements + and its elements should be increasing. + + Args: + input (Tensor): the input tensor. + bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor, + defines the sequence of bin edges including the rightmost edge. + + Keyword args: + range (tuple of float): Defines the range of the bins. + weight (Tensor): If provided, weight should have the same shape as input. Each value in + input contributes its associated weight towards its bin's result. + density (bool): If False, the result will contain the count (or total weight) in each bin. + If True, the result is the value of the probability density function over the bins, + normalized such that the integral over the range of the bins is 1. + out (Tensor, optional): the output tensor. (tuple, optional): The result tuple of two output tensors (hist, bin_edges). + + Returns: + hist (Tensor): 1D Tensor containing the values of the histogram. + bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins. + + Example:: + + >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.])) + (tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.])) + >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True) + (tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.])) + """ + ... +@overload +def histogramdd(input: Tensor, bins: _int, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd: + r""" + histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[]) + + Computes a multi-dimensional histogram of the values in a tensor. + + Interprets the elements of an input tensor whose innermost dimension has size N + as a collection of N-dimensional points. Maps each of the points into a set of + N-dimensional bins and returns the number of points (or total weight) in each bin. + + :attr:`input` must be a tensor with at least 2 dimensions. + If input has shape (M, N), each of its M rows defines a point in N-dimensional space. + If input has three or more dimensions, all but the last dimension are flattened. + + Each dimension is independently associated with its own strictly increasing sequence + of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D + tensors. Alternatively, bin edges may be constructed automatically by passing a + sequence of integers specifying the number of equal-width bins in each dimension. + + For each N-dimensional point in input: + - Each of its coordinates is binned independently among the bin edges + corresponding to its dimension + - Binning results are combined to identify the N-dimensional bin (if any) + into which the point falls + - If the point falls into a bin, the bin's count (or total weight) is incremented + - Points which do not fall into any bin do not contribute to the output + + :attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int. + + If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences + of bin edges. Each 1D tensor should contain a strictly increasing sequence with at + least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying + the left and right edges of all bins. Every bin is exclusive of its left edge. Only + the rightmost bin is inclusive of its right edge. + + If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins + in each dimension. By default, the leftmost and rightmost bin edges in each dimension + are determined by the minimum and maximum elements of the input tensor in the + corresponding dimension. The :attr:`range` argument can be provided to manually + specify the leftmost and rightmost bin edges in each dimension. + + If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions. + + .. note:: + See also :func:`torch.histogram`, which specifically computes 1D histograms. + While :func:`torch.histogramdd` infers the dimensionality of its bins and + binned values from the shape of :attr:`input`, :func:`torch.histogram` + accepts and flattens :attr:`input` of any shape. + + Args: + input (Tensor): the input tensor. + bins: Tensor[], int[], or int. + If Tensor[], defines the sequences of bin edges. + If int[], defines the number of equal-width bins in each dimension. + If int, defines the number of equal-width bins for all dimensions. + Keyword args: + range (sequence of float): Defines the leftmost and rightmost bin edges + in each dimension. + weight (Tensor): By default, each value in the input has weight 1. If a weight + tensor is passed, each N-dimensional coordinate in input + contributes its associated weight towards its bin's result. + The weight tensor should have the same shape as the :attr:`input` + tensor excluding its innermost dimension N. + density (bool): If False (default), the result will contain the count (or total weight) + in each bin. If True, each count (weight) is divided by the total count + (total weight), then divided by the volume of its associated bin. + Returns: + hist (Tensor): N-dimensional Tensor containing the values of the histogram. + bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges. + + Example:: + >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3], + ... weight=torch.tensor([1., 2., 4., 8.])) + torch.return_types.histogramdd( + hist=tensor([[0., 1., 0.], + [2., 0., 0.], + [4., 0., 8.]]), + bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]), + tensor([0.0000, 0.6667, 1.3333, 2.0000]))) + + >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2], + ... range=[0., 1., 0., 1.], density=True) + torch.return_types.histogramdd( + hist=tensor([[2., 0.], + [0., 2.]]), + bin_edges=(tensor([0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 1.0000]))) + """ + ... +@overload +def histogramdd(input: Tensor, bins: _size, range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd: + r""" + histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[]) + + Computes a multi-dimensional histogram of the values in a tensor. + + Interprets the elements of an input tensor whose innermost dimension has size N + as a collection of N-dimensional points. Maps each of the points into a set of + N-dimensional bins and returns the number of points (or total weight) in each bin. + + :attr:`input` must be a tensor with at least 2 dimensions. + If input has shape (M, N), each of its M rows defines a point in N-dimensional space. + If input has three or more dimensions, all but the last dimension are flattened. + + Each dimension is independently associated with its own strictly increasing sequence + of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D + tensors. Alternatively, bin edges may be constructed automatically by passing a + sequence of integers specifying the number of equal-width bins in each dimension. + + For each N-dimensional point in input: + - Each of its coordinates is binned independently among the bin edges + corresponding to its dimension + - Binning results are combined to identify the N-dimensional bin (if any) + into which the point falls + - If the point falls into a bin, the bin's count (or total weight) is incremented + - Points which do not fall into any bin do not contribute to the output + + :attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int. + + If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences + of bin edges. Each 1D tensor should contain a strictly increasing sequence with at + least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying + the left and right edges of all bins. Every bin is exclusive of its left edge. Only + the rightmost bin is inclusive of its right edge. + + If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins + in each dimension. By default, the leftmost and rightmost bin edges in each dimension + are determined by the minimum and maximum elements of the input tensor in the + corresponding dimension. The :attr:`range` argument can be provided to manually + specify the leftmost and rightmost bin edges in each dimension. + + If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions. + + .. note:: + See also :func:`torch.histogram`, which specifically computes 1D histograms. + While :func:`torch.histogramdd` infers the dimensionality of its bins and + binned values from the shape of :attr:`input`, :func:`torch.histogram` + accepts and flattens :attr:`input` of any shape. + + Args: + input (Tensor): the input tensor. + bins: Tensor[], int[], or int. + If Tensor[], defines the sequences of bin edges. + If int[], defines the number of equal-width bins in each dimension. + If int, defines the number of equal-width bins for all dimensions. + Keyword args: + range (sequence of float): Defines the leftmost and rightmost bin edges + in each dimension. + weight (Tensor): By default, each value in the input has weight 1. If a weight + tensor is passed, each N-dimensional coordinate in input + contributes its associated weight towards its bin's result. + The weight tensor should have the same shape as the :attr:`input` + tensor excluding its innermost dimension N. + density (bool): If False (default), the result will contain the count (or total weight) + in each bin. If True, each count (weight) is divided by the total count + (total weight), then divided by the volume of its associated bin. + Returns: + hist (Tensor): N-dimensional Tensor containing the values of the histogram. + bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges. + + Example:: + >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3], + ... weight=torch.tensor([1., 2., 4., 8.])) + torch.return_types.histogramdd( + hist=tensor([[0., 1., 0.], + [2., 0., 0.], + [4., 0., 8.]]), + bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]), + tensor([0.0000, 0.6667, 1.3333, 2.0000]))) + + >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2], + ... range=[0., 1., 0., 1.], density=True) + torch.return_types.histogramdd( + hist=tensor([[2., 0.], + [0., 2.]]), + bin_edges=(tensor([0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 1.0000]))) + """ + ... +@overload +def histogramdd(input: Tensor, bins: Union[Tuple[Tensor, ...], List[Tensor]], range: Optional[Sequence[_float]] = None, weight: Optional[Tensor] = None, density: _bool = False) -> torch.return_types.histogramdd: + r""" + histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[]) + + Computes a multi-dimensional histogram of the values in a tensor. + + Interprets the elements of an input tensor whose innermost dimension has size N + as a collection of N-dimensional points. Maps each of the points into a set of + N-dimensional bins and returns the number of points (or total weight) in each bin. + + :attr:`input` must be a tensor with at least 2 dimensions. + If input has shape (M, N), each of its M rows defines a point in N-dimensional space. + If input has three or more dimensions, all but the last dimension are flattened. + + Each dimension is independently associated with its own strictly increasing sequence + of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D + tensors. Alternatively, bin edges may be constructed automatically by passing a + sequence of integers specifying the number of equal-width bins in each dimension. + + For each N-dimensional point in input: + - Each of its coordinates is binned independently among the bin edges + corresponding to its dimension + - Binning results are combined to identify the N-dimensional bin (if any) + into which the point falls + - If the point falls into a bin, the bin's count (or total weight) is incremented + - Points which do not fall into any bin do not contribute to the output + + :attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int. + + If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences + of bin edges. Each 1D tensor should contain a strictly increasing sequence with at + least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying + the left and right edges of all bins. Every bin is exclusive of its left edge. Only + the rightmost bin is inclusive of its right edge. + + If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins + in each dimension. By default, the leftmost and rightmost bin edges in each dimension + are determined by the minimum and maximum elements of the input tensor in the + corresponding dimension. The :attr:`range` argument can be provided to manually + specify the leftmost and rightmost bin edges in each dimension. + + If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions. + + .. note:: + See also :func:`torch.histogram`, which specifically computes 1D histograms. + While :func:`torch.histogramdd` infers the dimensionality of its bins and + binned values from the shape of :attr:`input`, :func:`torch.histogram` + accepts and flattens :attr:`input` of any shape. + + Args: + input (Tensor): the input tensor. + bins: Tensor[], int[], or int. + If Tensor[], defines the sequences of bin edges. + If int[], defines the number of equal-width bins in each dimension. + If int, defines the number of equal-width bins for all dimensions. + Keyword args: + range (sequence of float): Defines the leftmost and rightmost bin edges + in each dimension. + weight (Tensor): By default, each value in the input has weight 1. If a weight + tensor is passed, each N-dimensional coordinate in input + contributes its associated weight towards its bin's result. + The weight tensor should have the same shape as the :attr:`input` + tensor excluding its innermost dimension N. + density (bool): If False (default), the result will contain the count (or total weight) + in each bin. If True, each count (weight) is divided by the total count + (total weight), then divided by the volume of its associated bin. + Returns: + hist (Tensor): N-dimensional Tensor containing the values of the histogram. + bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges. + + Example:: + >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3], + ... weight=torch.tensor([1., 2., 4., 8.])) + torch.return_types.histogramdd( + hist=tensor([[0., 1., 0.], + [2., 0., 0.], + [4., 0., 8.]]), + bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]), + tensor([0.0000, 0.6667, 1.3333, 2.0000]))) + + >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2], + ... range=[0., 1., 0., 1.], density=True) + torch.return_types.histogramdd( + hist=tensor([[2., 0.], + [0., 2.]]), + bin_edges=(tensor([0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 1.0000]))) + """ + ... +def hsmm(input: Tensor, mat2: Tensor) -> Tensor: ... +@overload +def hsplit(input: Tensor, sections: _int) -> Tuple[Tensor, ...]: + r""" + hsplit(input, indices_or_sections) -> List of Tensors + + Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors + horizontally according to :attr:`indices_or_sections`. Each split is a view of + :attr:`input`. + + If :attr:`input` is one dimensional this is equivalent to calling + torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is + zero), and if :attr:`input` has two or more dimensions it's equivalent to calling + torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1), + except that if :attr:`indices_or_sections` is an integer it must evenly divide + the split dimension or a runtime error will be thrown. + + This function is based on NumPy's :func:`numpy.hsplit`. + + Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + + Example:: + >>> t = torch.arange(16.0).reshape(4,4) + >>> t + tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> torch.hsplit(t, 2) + (tensor([[ 0., 1.], + [ 4., 5.], + [ 8., 9.], + [12., 13.]]), + tensor([[ 2., 3.], + [ 6., 7.], + [10., 11.], + [14., 15.]])) + >>> torch.hsplit(t, [3, 6]) + (tensor([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [12., 13., 14.]]), + tensor([[ 3.], + [ 7.], + [11.], + [15.]]), + tensor([], size=(4, 0))) + """ + ... +@overload +def hsplit(input: Tensor, indices: _size) -> Tuple[Tensor, ...]: + r""" + hsplit(input, indices_or_sections) -> List of Tensors + + Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors + horizontally according to :attr:`indices_or_sections`. Each split is a view of + :attr:`input`. + + If :attr:`input` is one dimensional this is equivalent to calling + torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is + zero), and if :attr:`input` has two or more dimensions it's equivalent to calling + torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1), + except that if :attr:`indices_or_sections` is an integer it must evenly divide + the split dimension or a runtime error will be thrown. + + This function is based on NumPy's :func:`numpy.hsplit`. + + Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + + Example:: + >>> t = torch.arange(16.0).reshape(4,4) + >>> t + tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> torch.hsplit(t, 2) + (tensor([[ 0., 1.], + [ 4., 5.], + [ 8., 9.], + [12., 13.]]), + tensor([[ 2., 3.], + [ 6., 7.], + [10., 11.], + [14., 15.]])) + >>> torch.hsplit(t, [3, 6]) + (tensor([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [12., 13., 14.]]), + tensor([[ 3.], + [ 7.], + [11.], + [15.]]), + tensor([], size=(4, 0))) + """ + ... +def hspmm(mat1: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + hspmm(mat1, mat2, *, out=None) -> Tensor + + Performs a matrix multiplication of a :ref:`sparse COO matrix + ` :attr:`mat1` and a strided matrix :attr:`mat2`. The + result is a (1 + 1)-dimensional :ref:`hybrid COO matrix + `. + + Args: + mat1 (Tensor): the first sparse matrix to be matrix multiplied + mat2 (Tensor): the second strided matrix to be matrix multiplied + + Keyword args: + out (Tensor, optional): the output tensor. + """ + ... +def hstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: + r""" + hstack(tensors, *, out=None) -> Tensor + + Stack tensors in sequence horizontally (column wise). + + This is equivalent to concatenation along the first axis for 1-D tensors, and along the second axis for all other tensors. + + Args: + tensors (sequence of Tensors): sequence of tensors to concatenate + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5, 6]) + >>> torch.hstack((a,b)) + tensor([1, 2, 3, 4, 5, 6]) + >>> a = torch.tensor([[1],[2],[3]]) + >>> b = torch.tensor([[4],[5],[6]]) + >>> torch.hstack((a,b)) + tensor([[1, 4], + [2, 5], + [3, 6]]) + """ + ... +def hypot(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + hypot(input, other, *, out=None) -> Tensor + + Given the legs of a right triangle, return its hypotenuse. + + .. math:: + \text{out}_{i} = \sqrt{\text{input}_{i}^{2} + \text{other}_{i}^{2}} + + The shapes of ``input`` and ``other`` must be + :ref:`broadcastable `. + + Args: + input (Tensor): the first input tensor + other (Tensor): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0])) + tensor([5.0000, 5.6569, 6.4031]) + """ + ... +def i0(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + i0(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.i0`. + """ + ... +def i0_(input: Tensor) -> Tensor: ... +def igamma(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + igamma(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.special.gammainc`. + """ + ... +def igammac(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + igammac(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.special.gammaincc`. + """ + ... +def imag(input: Tensor) -> Tensor: + r""" + imag(input) -> Tensor + + Returns a new tensor containing imaginary values of the :attr:`self` tensor. + The returned tensor and :attr:`self` share the same underlying storage. + + .. warning:: + :func:`imag` is only supported for tensors with complex dtypes. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> x=torch.randn(4, dtype=torch.cfloat) + >>> x + tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)]) + >>> x.imag + tensor([ 0.3553, -0.7896, -0.0633, -0.8119]) + """ + ... +@overload +def index_add(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor + + See :meth:`~Tensor.index_add_` for function description. + """ + ... +@overload +def index_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor + + See :meth:`~Tensor.index_add_` for function description. + """ + ... +@overload +def index_copy(input: Tensor, dim: _int, index: Tensor, source: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + index_copy(input, dim, index, source, *, out=None) -> Tensor + + See :meth:`~Tensor.index_add_` for function description. + """ + ... +@overload +def index_copy(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, source: Tensor) -> Tensor: + r""" + index_copy(input, dim, index, source, *, out=None) -> Tensor + + See :meth:`~Tensor.index_add_` for function description. + """ + ... +@overload +def index_fill(input: Tensor, dim: _int, index: Tensor, value: Tensor) -> Tensor: ... +@overload +def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Tensor) -> Tensor: ... +@overload +def index_fill(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex]) -> Tensor: ... +@overload +def index_fill(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: ... +def index_put(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ... +def index_put_(input: Tensor, indices: Optional[Union[Tuple[Tensor, ...], List[Tensor]]], values: Tensor, accumulate: _bool = False) -> Tensor: ... +def index_reduce(input: Tensor, dim: _int, index: Tensor, source: Tensor, reduce: str, *, include_self: _bool = True, out: Optional[Tensor] = None) -> Tensor: + r""" + index_reduce(input, dim, index, source, reduce, *, include_self=True, out=None) -> Tensor + + See :meth:`~Tensor.index_reduce_` for function description. + """ + ... +@overload +def index_select(input: Tensor, dim: _int, index: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + index_select(input, dim, index, *, out=None) -> Tensor + + Returns a new tensor which indexes the :attr:`input` tensor along dimension + :attr:`dim` using the entries in :attr:`index` which is a `LongTensor`. + + The returned tensor has the same number of dimensions as the original tensor + (:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length + of :attr:`index`; other dimensions have the same size as in the original tensor. + + .. note:: The returned tensor does **not** use the same storage as the original + tensor. If :attr:`out` has a different shape than expected, we + silently change it to the correct shape, reallocating the underlying + storage if necessary. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension in which we index + index (IntTensor or LongTensor): the 1-D tensor containing the indices to index + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.randn(3, 4) + >>> x + tensor([[ 0.1427, 0.0231, -0.5414, -1.0009], + [-0.4664, 0.2647, -0.1228, -1.1068], + [-1.1734, -0.6571, 0.7230, -0.6004]]) + >>> indices = torch.tensor([0, 2]) + >>> torch.index_select(x, 0, indices) + tensor([[ 0.1427, 0.0231, -0.5414, -1.0009], + [-1.1734, -0.6571, 0.7230, -0.6004]]) + >>> torch.index_select(x, 1, indices) + tensor([[ 0.1427, -0.5414], + [-0.4664, -0.1228], + [-1.1734, 0.7230]]) + """ + ... +@overload +def index_select(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + index_select(input, dim, index, *, out=None) -> Tensor + + Returns a new tensor which indexes the :attr:`input` tensor along dimension + :attr:`dim` using the entries in :attr:`index` which is a `LongTensor`. + + The returned tensor has the same number of dimensions as the original tensor + (:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length + of :attr:`index`; other dimensions have the same size as in the original tensor. + + .. note:: The returned tensor does **not** use the same storage as the original + tensor. If :attr:`out` has a different shape than expected, we + silently change it to the correct shape, reallocating the underlying + storage if necessary. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension in which we index + index (IntTensor or LongTensor): the 1-D tensor containing the indices to index + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.randn(3, 4) + >>> x + tensor([[ 0.1427, 0.0231, -0.5414, -1.0009], + [-0.4664, 0.2647, -0.1228, -1.1068], + [-1.1734, -0.6571, 0.7230, -0.6004]]) + >>> indices = torch.tensor([0, 2]) + >>> torch.index_select(x, 0, indices) + tensor([[ 0.1427, 0.0231, -0.5414, -1.0009], + [-1.1734, -0.6571, 0.7230, -0.6004]]) + >>> torch.index_select(x, 1, indices) + tensor([[ 0.1427, -0.5414], + [-0.4664, -0.1228], + [-1.1734, 0.7230]]) + """ + ... +def indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.indices`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def init_num_threads() -> None: ... +def inner(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + inner(input, other, *, out=None) -> Tensor + + Computes the dot product for 1D tensors. For higher dimensions, sums the product + of elements from :attr:`input` and :attr:`other` along their last dimension. + + .. note:: + + If either :attr:`input` or :attr:`other` is a scalar, the result is equivalent + to `torch.mul(input, other)`. + + If both :attr:`input` and :attr:`other` are non-scalars, the size of their last + dimension must match and the result is equivalent to `torch.tensordot(input, + other, dims=([-1], [-1]))` + + Args: + input (Tensor): First input tensor + other (Tensor): Second input tensor + + Keyword args: + out (Tensor, optional): Optional output tensor to write result into. The output + shape is `input.shape[:-1] + other.shape[:-1]`. + + Example:: + + # Dot product + >>> torch.inner(torch.tensor([1, 2, 3]), torch.tensor([0, 2, 1])) + tensor(7) + + # Multidimensional input tensors + >>> a = torch.randn(2, 3) + >>> a + tensor([[0.8173, 1.0874, 1.1784], + [0.3279, 0.1234, 2.7894]]) + >>> b = torch.randn(2, 4, 3) + >>> b + tensor([[[-0.4682, -0.7159, 0.1506], + [ 0.4034, -0.3657, 1.0387], + [ 0.9892, -0.6684, 0.1774], + [ 0.9482, 1.3261, 0.3917]], + + [[ 0.4537, 0.7493, 1.1724], + [ 0.2291, 0.5749, -0.2267], + [-0.7920, 0.3607, -0.3701], + [ 1.3666, -0.5850, -1.7242]]]) + >>> torch.inner(a, b) + tensor([[[-0.9837, 1.1560, 0.2907, 2.6785], + [ 2.5671, 0.5452, -0.6912, -1.5509]], + + [[ 0.1782, 2.9843, 0.7366, 1.5672], + [ 3.5115, -0.4864, -1.2476, -4.4337]]]) + + # Scalar input + >>> torch.inner(a, torch.tensor(2)) + tensor([[1.6347, 2.1748, 2.3567], + [0.6558, 0.2469, 5.5787]]) + """ + ... +def instance_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], use_input_stats: _bool, momentum: _float, eps: _float, cudnn_enabled: _bool) -> Tensor: ... +def int_repr(input: Tensor) -> Tensor: ... +def inverse(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + inverse(input, *, out=None) -> Tensor + + Alias for :func:`torch.linalg.inv` + """ + ... +def is_complex(input: Tensor) -> _bool: + r""" + is_complex(input) -> (bool) + + Returns True if the data type of :attr:`input` is a complex data type i.e., + one of ``torch.complex64``, and ``torch.complex128``. + + Args: + input (Tensor): the input tensor. + """ + ... +def is_conj(input: Tensor) -> _bool: + r""" + is_conj(input) -> (bool) + + Returns True if the :attr:`input` is a conjugated tensor, i.e. its conjugate bit is set to `True`. + + Args: + input (Tensor): the input tensor. + """ + ... +def is_distributed(input: Tensor) -> _bool: ... +def is_floating_point(input: Tensor) -> _bool: + r""" + is_floating_point(input) -> (bool) + + Returns True if the data type of :attr:`input` is a floating point data type i.e., + one of ``torch.float64``, ``torch.float32``, ``torch.float16``, and ``torch.bfloat16``. + + Args: + input (Tensor): the input tensor. + """ + ... +def is_grad_enabled() -> _bool: + r""" + is_grad_enabled() -> (bool) + + Returns True if grad mode is currently enabled. + """ + ... +def is_inference(input: Tensor) -> _bool: + r""" + is_inference(input) -> (bool) + + Returns True if :attr:`input` is an inference tensor. + + A non-view tensor is an inference tensor if and only if it was + allocated during inference mode. A view tensor is an inference + tensor if and only if the tensor it is a view of is an inference tensor. + + For details on inference mode please see + `Inference Mode `_. + + Args: + input (Tensor): the input tensor. + """ + ... +def is_inference_mode_enabled() -> _bool: + r""" + is_inference_mode_enabled() -> (bool) + + Returns True if inference mode is currently enabled. + """ + ... +def is_neg(input: Tensor) -> _bool: ... +def is_nonzero(input: Tensor) -> _bool: + r""" + is_nonzero(input) -> (bool) + + Returns True if the :attr:`input` is a single element tensor which is not equal to zero + after type conversions. + i.e. not equal to ``torch.tensor([0.])`` or ``torch.tensor([0])`` or + ``torch.tensor([False])``. + Throws a ``RuntimeError`` if ``torch.numel() != 1`` (even in case + of sparse tensors). + + Args: + input (Tensor): the input tensor. + + Examples:: + + >>> torch.is_nonzero(torch.tensor([0.])) + False + >>> torch.is_nonzero(torch.tensor([1.5])) + True + >>> torch.is_nonzero(torch.tensor([False])) + False + >>> torch.is_nonzero(torch.tensor([3])) + True + >>> torch.is_nonzero(torch.tensor([1, 3, 5])) + Traceback (most recent call last): + ... + RuntimeError: bool value of Tensor with more than one value is ambiguous + >>> torch.is_nonzero(torch.tensor([])) + Traceback (most recent call last): + ... + RuntimeError: bool value of Tensor with no values is ambiguous + """ + ... +def is_same_size(input: Tensor, other: Tensor) -> _bool: ... +def is_signed(input: Tensor) -> _bool: ... +def is_vulkan_available() -> _bool: ... +def isclose(input: Tensor, other: Tensor, rtol: _float = 1e-05, atol: _float = 1e-08, equal_nan: _bool = False) -> Tensor: + r""" + isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor + + Returns a new tensor with boolean elements representing if each element of + :attr:`input` is "close" to the corresponding element of :attr:`other`. + Closeness is defined as: + + .. math:: + \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert + + + where :attr:`input` and :attr:`other` are finite. Where :attr:`input` + and/or :attr:`other` are nonfinite they are close if and only if + they are equal, with NaNs being considered equal to each other when + :attr:`equal_nan` is True. + + Args: + input (Tensor): first tensor to compare + other (Tensor): second tensor to compare + atol (float, optional): absolute tolerance. Default: 1e-08 + rtol (float, optional): relative tolerance. Default: 1e-05 + equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False`` + + Examples:: + + >>> torch.isclose(torch.tensor((1., 2, 3)), torch.tensor((1 + 1e-10, 3, 4))) + tensor([ True, False, False]) + >>> torch.isclose(torch.tensor((float('inf'), 4)), torch.tensor((float('inf'), 6)), rtol=.5) + tensor([True, True]) + """ + ... +def isfinite(input: Tensor) -> Tensor: + r""" + isfinite(input) -> Tensor + + Returns a new tensor with boolean elements representing if each element is `finite` or not. + + Real values are finite when they are not NaN, negative infinity, or infinity. + Complex values are finite when both their real and imaginary parts are finite. + + Args: + input (Tensor): the input tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is finite and False elsewhere + + Example:: + + >>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')])) + tensor([True, False, True, False, False]) + """ + ... +@overload +def isin(elements: Tensor, test_elements: Tensor, *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor + + Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns + a boolean tensor of the same shape as :attr:`elements` that is True for elements + in :attr:`test_elements` and False otherwise. + + .. note:: + One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both. + + Args: + elements (Tensor or Scalar): Input elements + test_elements (Tensor or Scalar): Values against which to test for each input element + assume_unique (bool, optional): If True, assumes both :attr:`elements` and + :attr:`test_elements` contain unique elements, which can speed up the + calculation. Default: False + invert (bool, optional): If True, inverts the boolean return tensor, resulting in True + values for elements *not* in :attr:`test_elements`. Default: False + + Returns: + A boolean tensor of the same shape as :attr:`elements` that is True for elements in + :attr:`test_elements` and False otherwise + + Example: + >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3])) + tensor([[False, True], + [ True, False]]) + """ + ... +@overload +def isin(element: Union[Number, _complex], test_elements: Tensor, *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor + + Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns + a boolean tensor of the same shape as :attr:`elements` that is True for elements + in :attr:`test_elements` and False otherwise. + + .. note:: + One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both. + + Args: + elements (Tensor or Scalar): Input elements + test_elements (Tensor or Scalar): Values against which to test for each input element + assume_unique (bool, optional): If True, assumes both :attr:`elements` and + :attr:`test_elements` contain unique elements, which can speed up the + calculation. Default: False + invert (bool, optional): If True, inverts the boolean return tensor, resulting in True + values for elements *not* in :attr:`test_elements`. Default: False + + Returns: + A boolean tensor of the same shape as :attr:`elements` that is True for elements in + :attr:`test_elements` and False otherwise + + Example: + >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3])) + tensor([[False, True], + [ True, False]]) + """ + ... +@overload +def isin(elements: Tensor, test_element: Union[Number, _complex], *, assume_unique: _bool = False, invert: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor + + Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns + a boolean tensor of the same shape as :attr:`elements` that is True for elements + in :attr:`test_elements` and False otherwise. + + .. note:: + One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both. + + Args: + elements (Tensor or Scalar): Input elements + test_elements (Tensor or Scalar): Values against which to test for each input element + assume_unique (bool, optional): If True, assumes both :attr:`elements` and + :attr:`test_elements` contain unique elements, which can speed up the + calculation. Default: False + invert (bool, optional): If True, inverts the boolean return tensor, resulting in True + values for elements *not* in :attr:`test_elements`. Default: False + + Returns: + A boolean tensor of the same shape as :attr:`elements` that is True for elements in + :attr:`test_elements` and False otherwise + + Example: + >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3])) + tensor([[False, True], + [ True, False]]) + """ + ... +def isinf(input: Tensor) -> Tensor: + r""" + isinf(input) -> Tensor + + Tests if each element of :attr:`input` is infinite + (positive or negative infinity) or not. + + .. note:: + Complex values are infinite when their real or imaginary part is + infinite. + + Args: + input (Tensor): the input tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is infinite and False elsewhere + + Example:: + + >>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')])) + tensor([False, True, False, True, False]) + """ + ... +def isnan(input: Tensor) -> Tensor: + r""" + isnan(input) -> Tensor + + Returns a new tensor with boolean elements representing if each element of :attr:`input` + is NaN or not. Complex values are considered NaN when either their real + and/or imaginary part is NaN. + + Arguments: + input (Tensor): the input tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is NaN and False elsewhere + + Example:: + + >>> torch.isnan(torch.tensor([1, float('nan'), 2])) + tensor([False, True, False]) + """ + ... +def isneginf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + isneginf(input, *, out=None) -> Tensor + Tests if each element of :attr:`input` is negative infinity or not. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([-float('inf'), float('inf'), 1.2]) + >>> torch.isneginf(a) + tensor([ True, False, False]) + """ + ... +def isposinf(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + isposinf(input, *, out=None) -> Tensor + Tests if each element of :attr:`input` is positive infinity or not. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([-float('inf'), float('inf'), 1.2]) + >>> torch.isposinf(a) + tensor([False, True, False]) + """ + ... +def isreal(input: Tensor) -> Tensor: + r""" + isreal(input) -> Tensor + + Returns a new tensor with boolean elements representing if each element of :attr:`input` is real-valued or not. + All real-valued types are considered real. Complex values are considered real when their imaginary part is 0. + + Arguments: + input (Tensor): the input tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is real and False elsewhere + + Example:: + + >>> torch.isreal(torch.tensor([1, 1+1j, 2+0j])) + tensor([True, False, True]) + """ + ... +def istft(input: Tensor, n_fft: _int, hop_length: Optional[_int] = None, win_length: Optional[_int] = None, window: Optional[Tensor] = None, center: _bool = True, normalized: _bool = False, onesided: Optional[_bool] = None, length: Optional[_int] = None, return_complex: _bool = False) -> Tensor: ... +@overload +def kaiser_window(window_length: _int, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`. + + Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and + ``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True, + where ``L`` is the :attr:`window_length`. This function computes: + + .. math:: + out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta ) + + Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling + ``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``. + The :attr:`periodic` argument is intended as a helpful shorthand + to produce a periodic window as input to functions like :func:`torch.stft`. + + .. note:: + If :attr:`window_length` is one, then the returned window is a single element tensor containing a one. + + + Args: + window_length (int): length of the window. + periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis. + If False, returns a symmetric window suitable for use in filter design. + beta (float, optional): shape parameter for the window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + """ + ... +@overload +def kaiser_window(window_length: _int, periodic: _bool, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`. + + Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and + ``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True, + where ``L`` is the :attr:`window_length`. This function computes: + + .. math:: + out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta ) + + Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling + ``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``. + The :attr:`periodic` argument is intended as a helpful shorthand + to produce a periodic window as input to functions like :func:`torch.stft`. + + .. note:: + If :attr:`window_length` is one, then the returned window is a single element tensor containing a one. + + + Args: + window_length (int): length of the window. + periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis. + If False, returns a symmetric window suitable for use in filter design. + beta (float, optional): shape parameter for the window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + """ + ... +@overload +def kaiser_window(window_length: _int, periodic: _bool, beta: _float, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`. + + Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and + ``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True, + where ``L`` is the :attr:`window_length`. This function computes: + + .. math:: + out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta ) + + Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling + ``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``. + The :attr:`periodic` argument is intended as a helpful shorthand + to produce a periodic window as input to functions like :func:`torch.stft`. + + .. note:: + If :attr:`window_length` is one, then the returned window is a single element tensor containing a one. + + + Args: + window_length (int): length of the window. + periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis. + If False, returns a symmetric window suitable for use in filter design. + beta (float, optional): shape parameter for the window. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + """ + ... +def kl_div(input: Tensor, target: Tensor, reduction: _int = 1, *, log_target: _bool = False) -> Tensor: ... +def kron(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + kron(input, other, *, out=None) -> Tensor + + Computes the Kronecker product, denoted by :math:`\otimes`, of :attr:`input` and :attr:`other`. + + If :attr:`input` is a :math:`(a_0 \times a_1 \times \dots \times a_n)` tensor and :attr:`other` is a + :math:`(b_0 \times b_1 \times \dots \times b_n)` tensor, the result will be a + :math:`(a_0*b_0 \times a_1*b_1 \times \dots \times a_n*b_n)` tensor with the following entries: + + .. math:: + (\text{input} \otimes \text{other})_{k_0, k_1, \dots, k_n} = + \text{input}_{i_0, i_1, \dots, i_n} * \text{other}_{j_0, j_1, \dots, j_n}, + + where :math:`k_t = i_t * b_t + j_t` for :math:`0 \leq t \leq n`. + If one tensor has fewer dimensions than the other it is unsqueezed until it has the same number of dimensions. + + Supports real-valued and complex-valued inputs. + + .. note:: + This function generalizes the typical definition of the Kronecker product for two matrices to two tensors, + as described above. When :attr:`input` is a :math:`(m \times n)` matrix and :attr:`other` is a + :math:`(p \times q)` matrix, the result will be a :math:`(p*m \times q*n)` block matrix: + + .. math:: + \mathbf{A} \otimes \mathbf{B}=\begin{bmatrix} + a_{11} \mathbf{B} & \cdots & a_{1 n} \mathbf{B} \\ + \vdots & \ddots & \vdots \\ + a_{m 1} \mathbf{B} & \cdots & a_{m n} \mathbf{B} \end{bmatrix} + + where :attr:`input` is :math:`\mathbf{A}` and :attr:`other` is :math:`\mathbf{B}`. + + Arguments: + input (Tensor) + other (Tensor) + + Keyword args: + out (Tensor, optional): The output tensor. Ignored if ``None``. Default: ``None`` + + Examples:: + + >>> mat1 = torch.eye(2) + >>> mat2 = torch.ones(2, 2) + >>> torch.kron(mat1, mat2) + tensor([[1., 1., 0., 0.], + [1., 1., 0., 0.], + [0., 0., 1., 1.], + [0., 0., 1., 1.]]) + + >>> mat1 = torch.eye(2) + >>> mat2 = torch.arange(1, 5).reshape(2, 2) + >>> torch.kron(mat1, mat2) + tensor([[1., 2., 0., 0.], + [3., 4., 0., 0.], + [0., 0., 1., 2.], + [0., 0., 3., 4.]]) + """ + ... +@overload +def kthvalue(input: Tensor, k: _int, dim: _int = -1, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.kthvalue: + r""" + kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor) + + Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th + smallest element of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each element found. + + If :attr:`dim` is not given, the last dimension of the `input` is chosen. + + If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors + are the same size as :attr:`input`, except in the dimension :attr:`dim` where + they are of size 1. Otherwise, :attr:`dim` is squeezed + (see :func:`torch.squeeze`), resulting in both the :attr:`values` and + :attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor. + + .. note:: + When :attr:`input` is a CUDA tensor and there are multiple valid + :attr:`k` th values, this function may nondeterministically return + :attr:`indices` for any of them. + + Args: + input (Tensor): the input tensor. + k (int): k for the k-th smallest element + dim (int, optional): the dimension to find the kth value along + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (tuple, optional): the output tuple of (Tensor, LongTensor) + can be optionally given to be used as output buffers + + Example:: + + >>> x = torch.arange(1., 6.) + >>> x + tensor([ 1., 2., 3., 4., 5.]) + >>> torch.kthvalue(x, 4) + torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3)) + + >>> x=torch.arange(1.,7.).resize_(2,3) + >>> x + tensor([[ 1., 2., 3.], + [ 4., 5., 6.]]) + >>> torch.kthvalue(x, 2, 0, True) + torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]])) + """ + ... +@overload +def kthvalue(input: Tensor, k: _int, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.kthvalue: + r""" + kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor) + + Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th + smallest element of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each element found. + + If :attr:`dim` is not given, the last dimension of the `input` is chosen. + + If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors + are the same size as :attr:`input`, except in the dimension :attr:`dim` where + they are of size 1. Otherwise, :attr:`dim` is squeezed + (see :func:`torch.squeeze`), resulting in both the :attr:`values` and + :attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor. + + .. note:: + When :attr:`input` is a CUDA tensor and there are multiple valid + :attr:`k` th values, this function may nondeterministically return + :attr:`indices` for any of them. + + Args: + input (Tensor): the input tensor. + k (int): k for the k-th smallest element + dim (int, optional): the dimension to find the kth value along + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (tuple, optional): the output tuple of (Tensor, LongTensor) + can be optionally given to be used as output buffers + + Example:: + + >>> x = torch.arange(1., 6.) + >>> x + tensor([ 1., 2., 3., 4., 5.]) + >>> torch.kthvalue(x, 4) + torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3)) + + >>> x=torch.arange(1.,7.).resize_(2,3) + >>> x + tensor([[ 1., 2., 3.], + [ 4., 5., 6.]]) + >>> torch.kthvalue(x, 2, 0, True) + torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]])) + """ + ... +def layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: _float = 1e-05, cudnn_enable: _bool = True) -> Tensor: ... +def lcm(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + lcm(input, other, *, out=None) -> Tensor + + Computes the element-wise least common multiple (LCM) of :attr:`input` and :attr:`other`. + + Both :attr:`input` and :attr:`other` must have integer types. + + .. note:: + This defines :math:`lcm(0, 0) = 0` and :math:`lcm(0, a) = 0`. + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([5, 10, 15]) + >>> b = torch.tensor([3, 4, 5]) + >>> torch.lcm(a, b) + tensor([15, 20, 15]) + >>> c = torch.tensor([3]) + >>> torch.lcm(a, c) + tensor([15, 30, 15]) + """ + ... +def lcm_(input: Tensor, other: Tensor) -> Tensor: ... +def ldexp(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + ldexp(input, other, *, out=None) -> Tensor + + Multiplies :attr:`input` by 2 ** :attr:`other`. + + .. math:: + \text{{out}}_i = \text{{input}}_i * 2^\text{{other}}_i + + + Typically this function is used to construct floating point numbers by multiplying + mantissas in :attr:`input` with integral powers of two created from the exponents + in :attr:`other`. + + Args: + input (Tensor): the input tensor. + other (Tensor): a tensor of exponents, typically integers. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.ldexp(torch.tensor([1.]), torch.tensor([1])) + tensor([2.]) + >>> torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4])) + tensor([ 2., 4., 8., 16.]) + """ + ... +def ldexp_(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def le(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + le(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} \leq \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or Scalar): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is less than or equal to + :attr:`other` and False elsewhere + + Example:: + + >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[True, False], [True, True]]) + """ + ... +@overload +def le(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + le(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} \leq \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or Scalar): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is less than or equal to + :attr:`other` and False elsewhere + + Example:: + + >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[True, False], [True, True]]) + """ + ... +@overload +def lerp(input: Tensor, end: Tensor, weight: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + lerp(input, end, weight, *, out=None) + + Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based + on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor. + + .. math:: + \text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i) + + The shapes of :attr:`start` and :attr:`end` must be + :ref:`broadcastable `. If :attr:`weight` is a tensor, then + the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable `. + + Args: + input (Tensor): the tensor with the starting points + end (Tensor): the tensor with the ending points + weight (float or tensor): the weight for the interpolation formula + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> start = torch.arange(1., 5.) + >>> end = torch.empty(4).fill_(10) + >>> start + tensor([ 1., 2., 3., 4.]) + >>> end + tensor([ 10., 10., 10., 10.]) + >>> torch.lerp(start, end, 0.5) + tensor([ 5.5000, 6.0000, 6.5000, 7.0000]) + >>> torch.lerp(start, end, torch.full_like(start, 0.5)) + tensor([ 5.5000, 6.0000, 6.5000, 7.0000]) + """ + ... +@overload +def lerp(input: Tensor, end: Tensor, weight: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + lerp(input, end, weight, *, out=None) + + Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based + on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor. + + .. math:: + \text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i) + + The shapes of :attr:`start` and :attr:`end` must be + :ref:`broadcastable `. If :attr:`weight` is a tensor, then + the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable `. + + Args: + input (Tensor): the tensor with the starting points + end (Tensor): the tensor with the ending points + weight (float or tensor): the weight for the interpolation formula + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> start = torch.arange(1., 5.) + >>> end = torch.empty(4).fill_(10) + >>> start + tensor([ 1., 2., 3., 4.]) + >>> end + tensor([ 10., 10., 10., 10.]) + >>> torch.lerp(start, end, 0.5) + tensor([ 5.5000, 6.0000, 6.5000, 7.0000]) + >>> torch.lerp(start, end, torch.full_like(start, 0.5)) + tensor([ 5.5000, 6.0000, 6.5000, 7.0000]) + """ + ... +@overload +def less(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + less(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.lt`. + """ + ... +@overload +def less(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + less(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.lt`. + """ + ... +@overload +def less_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + less_equal(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.le`. + """ + ... +@overload +def less_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + less_equal(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.le`. + """ + ... +def lgamma(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + lgamma(input, *, out=None) -> Tensor + + Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`. + + .. math:: + \text{out}_{i} = \ln |\Gamma(\text{input}_{i})| + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.arange(0.5, 2, 0.5) + >>> torch.lgamma(a) + tensor([ 0.5724, 0.0000, -0.1208]) + """ + ... +@overload +def linspace(start: Number, end: Number, steps: Optional[_int] = None, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are: + + .. math:: + (\text{start}, + \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \ldots, + \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \text{end}) + + + From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + + Example:: + + >>> torch.linspace(3, 10, steps=5) + tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000]) + >>> torch.linspace(-10, 10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=1) + tensor([-10.]) + """ + ... +@overload +def linspace(start: Tensor, end: Tensor, steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are: + + .. math:: + (\text{start}, + \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \ldots, + \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \text{end}) + + + From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + + Example:: + + >>> torch.linspace(3, 10, steps=5) + tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000]) + >>> torch.linspace(-10, 10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=1) + tensor([-10.]) + """ + ... +@overload +def linspace(start: Union[Number, _complex], end: Tensor, steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are: + + .. math:: + (\text{start}, + \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \ldots, + \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \text{end}) + + + From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + + Example:: + + >>> torch.linspace(3, 10, steps=5) + tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000]) + >>> torch.linspace(-10, 10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=1) + tensor([-10.]) + """ + ... +@overload +def linspace(start: Tensor, end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are: + + .. math:: + (\text{start}, + \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \ldots, + \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \text{end}) + + + From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + + Example:: + + >>> torch.linspace(3, 10, steps=5) + tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000]) + >>> torch.linspace(-10, 10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=1) + tensor([-10.]) + """ + ... +@overload +def linspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are: + + .. math:: + (\text{start}, + \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \ldots, + \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \text{end}) + + + From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + + Example:: + + >>> torch.linspace(3, 10, steps=5) + tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000]) + >>> torch.linspace(-10, 10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=1) + tensor([-10.]) + """ + ... +def log(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + log(input, *, out=None) -> Tensor + + Returns a new tensor with the natural logarithm of the elements + of :attr:`input`. + + .. math:: + y_{i} = \log_{e} (x_{i}) + + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.rand(5) * 5 + >>> a + tensor([4.7767, 4.3234, 1.2156, 0.2411, 4.5739]) + >>> torch.log(a) + tensor([ 1.5637, 1.4640, 0.1952, -1.4226, 1.5204]) + """ + ... +def log10(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + log10(input, *, out=None) -> Tensor + + Returns a new tensor with the logarithm to the base 10 of the elements + of :attr:`input`. + + .. math:: + y_{i} = \log_{10} (x_{i}) + + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.rand(5) + >>> a + tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251]) + + + >>> torch.log10(a) + tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476]) + """ + ... +def log10_(input: Tensor) -> Tensor: ... +def log1p(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + log1p(input, *, out=None) -> Tensor + + Returns a new tensor with the natural logarithm of (1 + :attr:`input`). + + .. math:: + y_i = \log_{e} (x_i + 1) + + .. note:: This function is more accurate than :func:`torch.log` for small + values of :attr:`input` + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(5) + >>> a + tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492]) + >>> torch.log1p(a) + tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225]) + """ + ... +def log1p_(input: Tensor) -> Tensor: ... +def log2(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + log2(input, *, out=None) -> Tensor + + Returns a new tensor with the logarithm to the base 2 of the elements + of :attr:`input`. + + .. math:: + y_{i} = \log_{2} (x_{i}) + + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.rand(5) + >>> a + tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490]) + + + >>> torch.log2(a) + tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504]) + """ + ... +def log2_(input: Tensor) -> Tensor: ... +def log_(input: Tensor) -> Tensor: ... +@overload +def log_softmax(input: Tensor, dim: _int, dtype: Optional[_dtype] = None, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def log_softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: ... +def logaddexp(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logaddexp(input, other, *, out=None) -> Tensor + + Logarithm of the sum of exponentiations of the inputs. + + Calculates pointwise :math:`\log\left(e^x + e^y\right)`. This function is useful + in statistics where the calculated probabilities of events may be so small as to + exceed the range of normal floating point numbers. In such cases the logarithm + of the calculated probability is stored. This function allows adding + probabilities stored in such a fashion. + + This op should be disambiguated with :func:`torch.logsumexp` which performs a + reduction on a single tensor. + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3])) + tensor([-0.3069, -0.6867, -0.8731]) + >>> torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3])) + tensor([-1., -2., -3.]) + >>> torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3])) + tensor([1.1269e+00, 2.0000e+03, 3.0000e+04]) + """ + ... +def logaddexp2(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logaddexp2(input, other, *, out=None) -> Tensor + + Logarithm of the sum of exponentiations of the inputs in base-2. + + Calculates pointwise :math:`\log_2\left(2^x + 2^y\right)`. See + :func:`torch.logaddexp` for more details. + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + + Keyword arguments: + out (Tensor, optional): the output tensor. + """ + ... +@overload +def logcumsumexp(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logcumsumexp(input, dim, *, out=None) -> Tensor + Returns the logarithm of the cumulative summation of the exponentiation of + elements of :attr:`input` in the dimension :attr:`dim`. + + For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is + + .. math:: + \text{logcumsumexp}(x)_{ij} = \log \sum\limits_{j=0}^{i} \exp(x_{ij}) + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(10) + >>> torch.logcumsumexp(a, dim=0) + tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811, + 1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475])) + """ + ... +@overload +def logcumsumexp(input: Tensor, dim: Union[str, ellipsis, None], *, out: Optional[Tensor] = None) -> Tensor: + r""" + logcumsumexp(input, dim, *, out=None) -> Tensor + Returns the logarithm of the cumulative summation of the exponentiation of + elements of :attr:`input` in the dimension :attr:`dim`. + + For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is + + .. math:: + \text{logcumsumexp}(x)_{ij} = \log \sum\limits_{j=0}^{i} \exp(x_{ij}) + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to do the operation over + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(10) + >>> torch.logcumsumexp(a, dim=0) + tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811, + 1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475])) + """ + ... +def logdet(input: Tensor) -> Tensor: + r""" + logdet(input) -> Tensor + + Calculates log determinant of a square matrix or batches of square matrices. + + It returns ``-inf`` if the input has a determinant of zero, and ``NaN`` if it has + a negative determinant. + + .. note:: + Backward through :meth:`logdet` internally uses SVD results when :attr:`input` + is not invertible. In this case, double backward through :meth:`logdet` will + be unstable in when :attr:`input` doesn't have distinct singular values. See + :func:`torch.linalg.svd` for details. + + .. seealso:: + + :func:`torch.linalg.slogdet` computes the sign (resp. angle) and natural logarithm of the + absolute value of the determinant of real-valued (resp. complex) square matrices. + + Arguments: + input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more + batch dimensions. + + Example:: + + >>> A = torch.randn(3, 3) + >>> torch.det(A) + tensor(0.2611) + >>> torch.logdet(A) + tensor(-1.3430) + >>> A + tensor([[[ 0.9254, -0.6213], + [-0.5787, 1.6843]], + + [[ 0.3242, -0.9665], + [ 0.4539, -0.0887]], + + [[ 1.1336, -0.4025], + [-0.7089, 0.9032]]]) + >>> A.det() + tensor([1.1990, 0.4099, 0.7386]) + >>> A.det().log() + tensor([ 0.1815, -0.8917, -0.3031]) + """ + ... +def logical_and(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logical_and(input, other, *, out=None) -> Tensor + + Computes the element-wise logical AND of the given input tensors. Zeros are treated as ``False`` and nonzeros are + treated as ``True``. + + Args: + input (Tensor): the input tensor. + other (Tensor): the tensor to compute AND with + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False])) + tensor([ True, False, False]) + >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8) + >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8) + >>> torch.logical_and(a, b) + tensor([False, False, True, False]) + >>> torch.logical_and(a.double(), b.double()) + tensor([False, False, True, False]) + >>> torch.logical_and(a.double(), b) + tensor([False, False, True, False]) + >>> torch.logical_and(a, b, out=torch.empty(4, dtype=torch.bool)) + tensor([False, False, True, False]) + """ + ... +def logical_not(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logical_not(input, *, out=None) -> Tensor + + Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool + dtype. If the input tensor is not a bool tensor, zeros are treated as ``False`` and non-zeros are treated as ``True``. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.logical_not(torch.tensor([True, False])) + tensor([False, True]) + >>> torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8)) + tensor([ True, False, False]) + >>> torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double)) + tensor([ True, False, False]) + >>> torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16)) + tensor([1, 0, 0], dtype=torch.int16) + """ + ... +def logical_or(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logical_or(input, other, *, out=None) -> Tensor + + Computes the element-wise logical OR of the given input tensors. Zeros are treated as ``False`` and nonzeros are + treated as ``True``. + + Args: + input (Tensor): the input tensor. + other (Tensor): the tensor to compute OR with + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False])) + tensor([ True, False, True]) + >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8) + >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8) + >>> torch.logical_or(a, b) + tensor([ True, True, True, False]) + >>> torch.logical_or(a.double(), b.double()) + tensor([ True, True, True, False]) + >>> torch.logical_or(a.double(), b) + tensor([ True, True, True, False]) + >>> torch.logical_or(a, b, out=torch.empty(4, dtype=torch.bool)) + tensor([ True, True, True, False]) + """ + ... +def logical_xor(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logical_xor(input, other, *, out=None) -> Tensor + + Computes the element-wise logical XOR of the given input tensors. Zeros are treated as ``False`` and nonzeros are + treated as ``True``. + + Args: + input (Tensor): the input tensor. + other (Tensor): the tensor to compute XOR with + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False])) + tensor([False, False, True]) + >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8) + >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8) + >>> torch.logical_xor(a, b) + tensor([ True, True, False, False]) + >>> torch.logical_xor(a.double(), b.double()) + tensor([ True, True, False, False]) + >>> torch.logical_xor(a.double(), b) + tensor([ True, True, False, False]) + >>> torch.logical_xor(a, b, out=torch.empty(4, dtype=torch.bool)) + tensor([ True, True, False, False]) + """ + ... +def logit(input: Tensor, eps: Optional[_float] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logit(input, eps=None, *, out=None) -> Tensor + + Alias for :func:`torch.special.logit`. + """ + ... +def logit_(input: Tensor, eps: Optional[_float] = None) -> Tensor: ... +@overload +def logspace(start: Number, end: Number, steps: Optional[_int] = None, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to + :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale + with base :attr:`base`. That is, the values are: + + .. math:: + (\text{base}^{\text{start}}, + \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \ldots, + \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \text{base}^{\text{end}}) + + + + From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + base (float, optional): base of the logarithm function. Default: ``10.0``. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.logspace(start=-10, end=10, steps=5) + tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10]) + >>> torch.logspace(start=0.1, end=1.0, steps=5) + tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000]) + >>> torch.logspace(start=0.1, end=1.0, steps=1) + tensor([1.2589]) + >>> torch.logspace(start=2, end=2, steps=1, base=2) + tensor([4.0]) + """ + ... +@overload +def logspace(start: Tensor, end: Tensor, steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to + :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale + with base :attr:`base`. That is, the values are: + + .. math:: + (\text{base}^{\text{start}}, + \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \ldots, + \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \text{base}^{\text{end}}) + + + + From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + base (float, optional): base of the logarithm function. Default: ``10.0``. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.logspace(start=-10, end=10, steps=5) + tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10]) + >>> torch.logspace(start=0.1, end=1.0, steps=5) + tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000]) + >>> torch.logspace(start=0.1, end=1.0, steps=1) + tensor([1.2589]) + >>> torch.logspace(start=2, end=2, steps=1, base=2) + tensor([4.0]) + """ + ... +@overload +def logspace(start: Union[Number, _complex], end: Tensor, steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to + :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale + with base :attr:`base`. That is, the values are: + + .. math:: + (\text{base}^{\text{start}}, + \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \ldots, + \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \text{base}^{\text{end}}) + + + + From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + base (float, optional): base of the logarithm function. Default: ``10.0``. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.logspace(start=-10, end=10, steps=5) + tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10]) + >>> torch.logspace(start=0.1, end=1.0, steps=5) + tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000]) + >>> torch.logspace(start=0.1, end=1.0, steps=1) + tensor([1.2589]) + >>> torch.logspace(start=2, end=2, steps=1, base=2) + tensor([4.0]) + """ + ... +@overload +def logspace(start: Tensor, end: Union[Number, _complex], steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to + :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale + with base :attr:`base`. That is, the values are: + + .. math:: + (\text{base}^{\text{start}}, + \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \ldots, + \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \text{base}^{\text{end}}) + + + + From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + base (float, optional): base of the logarithm function. Default: ``10.0``. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.logspace(start=-10, end=10, steps=5) + tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10]) + >>> torch.logspace(start=0.1, end=1.0, steps=5) + tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000]) + >>> torch.logspace(start=0.1, end=1.0, steps=1) + tensor([1.2589]) + >>> torch.logspace(start=2, end=2, steps=1, base=2) + tensor([4.0]) + """ + ... +@overload +def logspace(start: Union[Number, _complex], end: Union[Number, _complex], steps: _int, base: _float = 10.0, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + logspace(start, end, steps, base=10.0, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + + Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly + spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to + :math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale + with base :attr:`base`. That is, the values are: + + .. math:: + (\text{base}^{\text{start}}, + \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \ldots, + \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \text{base}^{\text{end}}) + + + + From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior. + + Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + base (float, optional): base of the logarithm function. Default: ``10.0``. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.logspace(start=-10, end=10, steps=5) + tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10]) + >>> torch.logspace(start=0.1, end=1.0, steps=5) + tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000]) + >>> torch.logspace(start=0.1, end=1.0, steps=1) + tensor([1.2589]) + >>> torch.logspace(start=2, end=2, steps=1, base=2) + tensor([4.0]) + """ + ... +@overload +def logsumexp(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logsumexp(input, dim, keepdim=False, *, out=None) + + Returns the log of summed exponentials of each row of the :attr:`input` + tensor in the given dimension :attr:`dim`. The computation is numerically + stabilized. + + For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is + + .. math:: + \text{logsumexp}(x)_{i} = \log \sum_j \exp(x_{ij}) + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(3, 3) + >>> torch.logsumexp(a, 1) + tensor([1.4907, 1.0593, 1.5696]) + >>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1))) + tensor(1.6859e-07) + """ + ... +@overload +def logsumexp(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + logsumexp(input, dim, keepdim=False, *, out=None) + + Returns the log of summed exponentials of each row of the :attr:`input` + tensor in the given dimension :attr:`dim`. The computation is numerically + stabilized. + + For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is + + .. math:: + \text{logsumexp}(x)_{i} = \log \sum_j \exp(x_{ij}) + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(3, 3) + >>> torch.logsumexp(a, 1) + tensor([1.4907, 1.0593, 1.5696]) + >>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1))) + tensor(1.6859e-07) + """ + ... +@overload +def lstm(data: Tensor, batch_sizes: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor, Tensor]: ... +@overload +def lstm(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor, Tensor]: ... +def lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ... +@overload +def lt(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + lt(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} < \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere + + Example:: + + >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, False], [True, False]]) + """ + ... +@overload +def lt(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + lt(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} < \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere + + Example:: + + >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, False], [True, False]]) + """ + ... +def lu_solve(input: Tensor, LU_data: Tensor, LU_pivots: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + lu_solve(b, LU_data, LU_pivots, *, out=None) -> Tensor + + Returns the LU solve of the linear system :math:`Ax = b` using the partially pivoted + LU factorization of A from :func:`~linalg.lu_factor`. + + This function supports ``float``, ``double``, ``cfloat`` and ``cdouble`` dtypes for :attr:`input`. + + .. warning:: + + :func:`torch.lu_solve` is deprecated in favor of :func:`torch.linalg.lu_solve`. + :func:`torch.lu_solve` will be removed in a future PyTorch release. + ``X = torch.lu_solve(B, LU, pivots)`` should be replaced with + + .. code:: python + + X = linalg.lu_solve(LU, pivots, B) + + Arguments: + b (Tensor): the RHS tensor of size :math:`(*, m, k)`, where :math:`*` + is zero or more batch dimensions. + LU_data (Tensor): the pivoted LU factorization of A from :meth:`~linalg.lu_factor` of size :math:`(*, m, m)`, + where :math:`*` is zero or more batch dimensions. + LU_pivots (IntTensor): the pivots of the LU factorization from :meth:`~linalg.lu_factor` of size :math:`(*, m)`, + where :math:`*` is zero or more batch dimensions. + The batch dimensions of :attr:`LU_pivots` must be equal to the batch dimensions of + :attr:`LU_data`. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> A = torch.randn(2, 3, 3) + >>> b = torch.randn(2, 3, 1) + >>> LU, pivots = torch.linalg.lu_factor(A) + >>> x = torch.lu_solve(b, LU, pivots) + >>> torch.dist(A @ x, b) + tensor(1.00000e-07 * + 2.8312) + """ + ... +def lu_unpack(LU_data: Tensor, LU_pivots: Tensor, unpack_data: _bool = True, unpack_pivots: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.lu_unpack: + r""" + lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True, *, out=None) -> (Tensor, Tensor, Tensor) + + Unpacks the LU decomposition returned by :func:`~linalg.lu_factor` into the `P, L, U` matrices. + + .. seealso:: + + :func:`~linalg.lu` returns the matrices from the LU decomposition. Its gradient formula is more efficient + than that of doing :func:`~linalg.lu_factor` followed by :func:`~linalg.lu_unpack`. + + Args: + LU_data (Tensor): the packed LU factorization data + LU_pivots (Tensor): the packed LU factorization pivots + unpack_data (bool): flag indicating if the data should be unpacked. + If ``False``, then the returned ``L`` and ``U`` are empty tensors. + Default: ``True`` + unpack_pivots (bool): flag indicating if the pivots should be unpacked into a permutation matrix ``P``. + If ``False``, then the returned ``P`` is an empty tensor. + Default: ``True`` + + Keyword args: + out (tuple, optional): output tuple of three tensors. Ignored if `None`. + + Returns: + A namedtuple ``(P, L, U)`` + + Examples:: + + >>> A = torch.randn(2, 3, 3) + >>> LU, pivots = torch.linalg.lu_factor(A) + >>> P, L, U = torch.lu_unpack(LU, pivots) + >>> # We can recover A from the factorization + >>> A_ = P @ L @ U + >>> torch.allclose(A, A_) + True + + >>> # LU factorization of a rectangular matrix: + >>> A = torch.randn(2, 3, 2) + >>> LU, pivots = torch.linalg.lu_factor(A) + >>> P, L, U = torch.lu_unpack(LU, pivots) + >>> # P, L, U are the same as returned by linalg.lu + >>> P_, L_, U_ = torch.linalg.lu(A) + >>> torch.allclose(P, P_) and torch.allclose(L, L_) and torch.allclose(U, U_) + True + """ + ... +def margin_ranking_loss(input1: Tensor, input2: Tensor, target: Tensor, margin: _float = 0.0, reduction: _int = 1) -> Tensor: ... +@overload +def masked_fill(input: Tensor, mask: Tensor, value: Tensor) -> Tensor: ... +@overload +def masked_fill(input: Tensor, mask: Tensor, value: Union[Number, _complex]) -> Tensor: ... +def masked_scatter(input: Tensor, mask: Tensor, source: Tensor) -> Tensor: ... +def masked_select(input: Tensor, mask: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + masked_select(input, mask, *, out=None) -> Tensor + + Returns a new 1-D tensor which indexes the :attr:`input` tensor according to + the boolean mask :attr:`mask` which is a `BoolTensor`. + + The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need + to match, but they must be :ref:`broadcastable `. + + .. note:: The returned tensor does **not** use the same storage + as the original tensor + + Args: + input (Tensor): the input tensor. + mask (BoolTensor): the tensor containing the binary mask to index with + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.randn(3, 4) + >>> x + tensor([[ 0.3552, -2.3825, -0.8297, 0.3477], + [-1.2035, 1.2252, 0.5002, 0.6248], + [ 0.1307, -2.0608, 0.1244, 2.0139]]) + >>> mask = x.ge(0.5) + >>> mask + tensor([[False, False, False, False], + [False, True, True, True], + [False, False, False, True]]) + >>> torch.masked_select(x, mask) + tensor([ 1.2252, 0.5002, 0.6248, 2.0139]) + """ + ... +def matmul(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + matmul(input, other, *, out=None) -> Tensor + + Matrix product of two tensors. + + The behavior depends on the dimensionality of the tensors as follows: + + - If both tensors are 1-dimensional, the dot product (scalar) is returned. + - If both arguments are 2-dimensional, the matrix-matrix product is returned. + - If the first argument is 1-dimensional and the second argument is 2-dimensional, + a 1 is prepended to its dimension for the purpose of the matrix multiply. + After the matrix multiply, the prepended dimension is removed. + - If the first argument is 2-dimensional and the second argument is 1-dimensional, + the matrix-vector product is returned. + - If both arguments are at least 1-dimensional and at least one argument is + N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first + argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the + batched matrix multiply and removed after. If the second argument is 1-dimensional, a + 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. + The non-matrix (i.e. batch) dimensions are :ref:`broadcasted ` (and thus + must be broadcastable). For example, if :attr:`input` is a + :math:`(j \times 1 \times n \times n)` tensor and :attr:`other` is a :math:`(k \times n \times n)` + tensor, :attr:`out` will be a :math:`(j \times k \times n \times n)` tensor. + + Note that the broadcasting logic only looks at the batch dimensions when determining if the inputs + are broadcastable, and not the matrix dimensions. For example, if :attr:`input` is a + :math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)` + tensor, these inputs are valid for broadcasting even though the final two dimensions (i.e. the + matrix dimensions) are different. :attr:`out` will be a :math:`(j \times k \times n \times p)` tensor. + + This operation has support for arguments with :ref:`sparse layouts`. In particular the + matrix-matrix (both arguments 2-dimensional) supports sparse arguments with the same restrictions + as :func:`torch.mm` + + + .. warning:: + Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, + or may not have autograd support. If you notice missing functionality please + open a feature request. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + .. note:: + + The 1-dimensional dot product version of this function does not support an :attr:`out` parameter. + + Arguments: + input (Tensor): the first tensor to be multiplied + other (Tensor): the second tensor to be multiplied + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> # vector x vector + >>> tensor1 = torch.randn(3) + >>> tensor2 = torch.randn(3) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([]) + >>> # matrix x vector + >>> tensor1 = torch.randn(3, 4) + >>> tensor2 = torch.randn(4) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([3]) + >>> # batched matrix x broadcasted vector + >>> tensor1 = torch.randn(10, 3, 4) + >>> tensor2 = torch.randn(4) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([10, 3]) + >>> # batched matrix x batched matrix + >>> tensor1 = torch.randn(10, 3, 4) + >>> tensor2 = torch.randn(10, 4, 5) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([10, 3, 5]) + >>> # batched matrix x broadcasted matrix + >>> tensor1 = torch.randn(10, 3, 4) + >>> tensor2 = torch.randn(4, 5) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([10, 3, 5]) + """ + ... +def matrix_exp(input: Tensor) -> Tensor: + r""" + matrix_exp(A) -> Tensor + + Alias for :func:`torch.linalg.matrix_exp`. + """ + ... +def matrix_power(input: Tensor, n: _int, *, out: Optional[Tensor] = None) -> Tensor: + r""" + matrix_power(input, n, *, out=None) -> Tensor + + Alias for :func:`torch.linalg.matrix_power` + """ + ... +@overload +def max(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + max(input) -> Tensor + + Returns the maximum value of all elements in the ``input`` tensor. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``max(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6763, 0.7445, -2.2369]]) + >>> torch.max(a) + tensor(0.7445) + + .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each maximum value found + (argmax). + + If ``keepdim`` is ``True``, the output tensors are of the same size + as ``input`` except in the dimension ``dim`` where they are of size 1. + Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting + in the output tensors having 1 fewer dimension than ``input``. + + .. note:: If there are multiple maximal values in a reduced row then + the indices of the first maximal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``. + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (max, max_indices) + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-1.2360, -0.2942, -0.1222, 0.8475], + [ 1.1949, -1.1127, -2.2379, -0.6702], + [ 1.5717, -0.9207, 0.1297, -1.8768], + [-0.6172, 1.0036, -0.6060, -0.2432]]) + >>> torch.max(a, 1) + torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1])) + + .. function:: max(input, other, *, out=None) -> Tensor + :noindex: + + See :func:`torch.maximum`. + """ + ... +@overload +def max(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + max(input) -> Tensor + + Returns the maximum value of all elements in the ``input`` tensor. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``max(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6763, 0.7445, -2.2369]]) + >>> torch.max(a) + tensor(0.7445) + + .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each maximum value found + (argmax). + + If ``keepdim`` is ``True``, the output tensors are of the same size + as ``input`` except in the dimension ``dim`` where they are of size 1. + Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting + in the output tensors having 1 fewer dimension than ``input``. + + .. note:: If there are multiple maximal values in a reduced row then + the indices of the first maximal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``. + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (max, max_indices) + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-1.2360, -0.2942, -0.1222, 0.8475], + [ 1.1949, -1.1127, -2.2379, -0.6702], + [ 1.5717, -0.9207, 0.1297, -1.8768], + [-0.6172, 1.0036, -0.6060, -0.2432]]) + >>> torch.max(a, 1) + torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1])) + + .. function:: max(input, other, *, out=None) -> Tensor + :noindex: + + See :func:`torch.maximum`. + """ + ... +@overload +def max(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.max: + r""" + max(input) -> Tensor + + Returns the maximum value of all elements in the ``input`` tensor. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``max(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6763, 0.7445, -2.2369]]) + >>> torch.max(a) + tensor(0.7445) + + .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each maximum value found + (argmax). + + If ``keepdim`` is ``True``, the output tensors are of the same size + as ``input`` except in the dimension ``dim`` where they are of size 1. + Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting + in the output tensors having 1 fewer dimension than ``input``. + + .. note:: If there are multiple maximal values in a reduced row then + the indices of the first maximal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``. + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (max, max_indices) + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-1.2360, -0.2942, -0.1222, 0.8475], + [ 1.1949, -1.1127, -2.2379, -0.6702], + [ 1.5717, -0.9207, 0.1297, -1.8768], + [-0.6172, 1.0036, -0.6060, -0.2432]]) + >>> torch.max(a, 1) + torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1])) + + .. function:: max(input, other, *, out=None) -> Tensor + :noindex: + + See :func:`torch.maximum`. + """ + ... +@overload +def max(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.max: + r""" + max(input) -> Tensor + + Returns the maximum value of all elements in the ``input`` tensor. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``max(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6763, 0.7445, -2.2369]]) + >>> torch.max(a) + tensor(0.7445) + + .. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each maximum value found + (argmax). + + If ``keepdim`` is ``True``, the output tensors are of the same size + as ``input`` except in the dimension ``dim`` where they are of size 1. + Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting + in the output tensors having 1 fewer dimension than ``input``. + + .. note:: If there are multiple maximal values in a reduced row then + the indices of the first maximal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. Default: ``False``. + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (max, max_indices) + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-1.2360, -0.2942, -0.1222, 0.8475], + [ 1.1949, -1.1127, -2.2379, -0.6702], + [ 1.5717, -0.9207, 0.1297, -1.8768], + [-0.6172, 1.0036, -0.6060, -0.2432]]) + >>> torch.max(a, 1) + torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1])) + + .. function:: max(input, other, *, out=None) -> Tensor + :noindex: + + See :func:`torch.maximum`. + """ + ... +def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def max_pool1d_with_indices(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tuple[Tensor, Tensor]: ... +def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def maximum(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + maximum(input, other, *, out=None) -> Tensor + + Computes the element-wise maximum of :attr:`input` and :attr:`other`. + + .. note:: + If one of the elements being compared is a NaN, then that element is returned. + :func:`maximum` is not supported for tensors with complex dtypes. + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor((1, 2, -1)) + >>> b = torch.tensor((3, 0, 4)) + >>> torch.maximum(a, b) + tensor([3, 2, 4]) + """ + ... +@overload +def mean(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + mean(input, *, dtype=None) -> Tensor + + Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex. + + Args: + input (Tensor): + the input tensor, either of floating point or complex dtype + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.2294, -0.5481, 1.3288]]) + >>> torch.mean(a) + tensor(0.3367) + + .. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor + :noindex: + + Returns the mean value of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, + reduce over all of them. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + .. seealso:: + + :func:`torch.nanmean` computes the mean value of `non-NaN` elements. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.3841, 0.6320, 0.4254, -0.7384], + [-0.9644, 1.0131, -0.6549, -1.4279], + [-0.2951, -1.3350, -0.7694, 0.5600], + [ 1.0842, -0.9580, 0.3623, 0.2343]]) + >>> torch.mean(a, 1) + tensor([-0.0163, -0.5085, -0.4599, 0.1807]) + >>> torch.mean(a, 1, True) + tensor([[-0.0163], + [-0.5085], + [-0.4599], + [ 0.1807]]) + """ + ... +@overload +def mean(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + mean(input, *, dtype=None) -> Tensor + + Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex. + + Args: + input (Tensor): + the input tensor, either of floating point or complex dtype + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.2294, -0.5481, 1.3288]]) + >>> torch.mean(a) + tensor(0.3367) + + .. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor + :noindex: + + Returns the mean value of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, + reduce over all of them. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + .. seealso:: + + :func:`torch.nanmean` computes the mean value of `non-NaN` elements. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.3841, 0.6320, 0.4254, -0.7384], + [-0.9644, 1.0131, -0.6549, -1.4279], + [-0.2951, -1.3350, -0.7694, 0.5600], + [ 1.0842, -0.9580, 0.3623, 0.2343]]) + >>> torch.mean(a, 1) + tensor([-0.0163, -0.5085, -0.4599, 0.1807]) + >>> torch.mean(a, 1, True) + tensor([[-0.0163], + [-0.5085], + [-0.4599], + [ 0.1807]]) + """ + ... +@overload +def mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + mean(input, *, dtype=None) -> Tensor + + Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex. + + Args: + input (Tensor): + the input tensor, either of floating point or complex dtype + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.2294, -0.5481, 1.3288]]) + >>> torch.mean(a) + tensor(0.3367) + + .. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor + :noindex: + + Returns the mean value of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, + reduce over all of them. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + .. seealso:: + + :func:`torch.nanmean` computes the mean value of `non-NaN` elements. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.3841, 0.6320, 0.4254, -0.7384], + [-0.9644, 1.0131, -0.6549, -1.4279], + [-0.2951, -1.3350, -0.7694, 0.5600], + [ 1.0842, -0.9580, 0.3623, 0.2343]]) + >>> torch.mean(a, 1) + tensor([-0.0163, -0.5085, -0.4599, 0.1807]) + >>> torch.mean(a, 1, True) + tensor([[-0.0163], + [-0.5085], + [-0.4599], + [ 0.1807]]) + """ + ... +@overload +def median(input: Tensor) -> Tensor: + r""" + median(input) -> Tensor + + Returns the median of the values in :attr:`input`. + + .. note:: + The median is not unique for :attr:`input` tensors with an even number + of elements. In this case the lower of the two medians is returned. To + compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``median(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 1.5219, -1.5212, 0.2202]]) + >>> torch.median(a) + tensor(0.2202) + + .. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` + in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`. + + By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. + + If :attr:`keepdim` is ``True``, the output tensors are of the same size + as :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the outputs tensor having 1 fewer dimension than :attr:`input`. + + .. note:: + The median is not unique for :attr:`input` tensors with an even number + of elements in the dimension :attr:`dim`. In this case the lower of the + two medians is returned. To compute the mean of both medians in + :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead. + + .. warning:: + ``indices`` does not necessarily contain the first occurrence of each + median value found, unless it is unique. + The exact implementation details are device-specific. + Do not expect the same result when run on CPU and GPU in general. + For the same reason do not expect the gradients to be deterministic. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second + tensor, which must have dtype long, with their indices in the dimension + :attr:`dim` of :attr:`input`. + + Example:: + + >>> a = torch.randn(4, 5) + >>> a + tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131], + [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270], + [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488], + [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]]) + >>> torch.median(a, 1) + torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3])) + """ + ... +@overload +def median(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.median: + r""" + median(input) -> Tensor + + Returns the median of the values in :attr:`input`. + + .. note:: + The median is not unique for :attr:`input` tensors with an even number + of elements. In this case the lower of the two medians is returned. To + compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``median(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 1.5219, -1.5212, 0.2202]]) + >>> torch.median(a) + tensor(0.2202) + + .. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` + in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`. + + By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. + + If :attr:`keepdim` is ``True``, the output tensors are of the same size + as :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the outputs tensor having 1 fewer dimension than :attr:`input`. + + .. note:: + The median is not unique for :attr:`input` tensors with an even number + of elements in the dimension :attr:`dim`. In this case the lower of the + two medians is returned. To compute the mean of both medians in + :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead. + + .. warning:: + ``indices`` does not necessarily contain the first occurrence of each + median value found, unless it is unique. + The exact implementation details are device-specific. + Do not expect the same result when run on CPU and GPU in general. + For the same reason do not expect the gradients to be deterministic. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second + tensor, which must have dtype long, with their indices in the dimension + :attr:`dim` of :attr:`input`. + + Example:: + + >>> a = torch.randn(4, 5) + >>> a + tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131], + [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270], + [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488], + [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]]) + >>> torch.median(a, 1) + torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3])) + """ + ... +@overload +def median(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.median: + r""" + median(input) -> Tensor + + Returns the median of the values in :attr:`input`. + + .. note:: + The median is not unique for :attr:`input` tensors with an even number + of elements. In this case the lower of the two medians is returned. To + compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``median(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 1.5219, -1.5212, 0.2202]]) + >>> torch.median(a) + tensor(0.2202) + + .. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` + in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`. + + By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. + + If :attr:`keepdim` is ``True``, the output tensors are of the same size + as :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the outputs tensor having 1 fewer dimension than :attr:`input`. + + .. note:: + The median is not unique for :attr:`input` tensors with an even number + of elements in the dimension :attr:`dim`. In this case the lower of the + two medians is returned. To compute the mean of both medians in + :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead. + + .. warning:: + ``indices`` does not necessarily contain the first occurrence of each + median value found, unless it is unique. + The exact implementation details are device-specific. + Do not expect the same result when run on CPU and GPU in general. + For the same reason do not expect the gradients to be deterministic. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second + tensor, which must have dtype long, with their indices in the dimension + :attr:`dim` of :attr:`input`. + + Example:: + + >>> a = torch.randn(4, 5) + >>> a + tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131], + [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270], + [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488], + [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]]) + >>> torch.median(a, 1) + torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3])) + """ + ... +@overload +def min(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + min(input) -> Tensor + + Returns the minimum value of all elements in the :attr:`input` tensor. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``min(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6750, 1.0857, 1.7197]]) + >>> torch.min(a) + tensor(0.6750) + + .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each minimum value found + (argmin). + + If :attr:`keepdim` is ``True``, the output tensors are of the same size as + :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the output tensors having 1 fewer dimension than :attr:`input`. + + .. note:: If there are multiple minimal values in a reduced row then + the indices of the first minimal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (tuple, optional): the tuple of two output tensors (min, min_indices) + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.6248, 1.1334, -1.1899, -0.2803], + [-1.4644, -0.2635, -0.3651, 0.6134], + [ 0.2457, 0.0384, 1.0128, 0.7015], + [-0.1153, 2.9849, 2.1458, 0.5788]]) + >>> torch.min(a, 1) + torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0])) + + .. function:: min(input, other, *, out=None) -> Tensor + :noindex: + + See :func:`torch.minimum`. + """ + ... +@overload +def min(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + min(input) -> Tensor + + Returns the minimum value of all elements in the :attr:`input` tensor. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``min(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6750, 1.0857, 1.7197]]) + >>> torch.min(a) + tensor(0.6750) + + .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each minimum value found + (argmin). + + If :attr:`keepdim` is ``True``, the output tensors are of the same size as + :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the output tensors having 1 fewer dimension than :attr:`input`. + + .. note:: If there are multiple minimal values in a reduced row then + the indices of the first minimal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (tuple, optional): the tuple of two output tensors (min, min_indices) + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.6248, 1.1334, -1.1899, -0.2803], + [-1.4644, -0.2635, -0.3651, 0.6134], + [ 0.2457, 0.0384, 1.0128, 0.7015], + [-0.1153, 2.9849, 2.1458, 0.5788]]) + >>> torch.min(a, 1) + torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0])) + + .. function:: min(input, other, *, out=None) -> Tensor + :noindex: + + See :func:`torch.minimum`. + """ + ... +@overload +def min(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.min: + r""" + min(input) -> Tensor + + Returns the minimum value of all elements in the :attr:`input` tensor. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``min(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6750, 1.0857, 1.7197]]) + >>> torch.min(a) + tensor(0.6750) + + .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each minimum value found + (argmin). + + If :attr:`keepdim` is ``True``, the output tensors are of the same size as + :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the output tensors having 1 fewer dimension than :attr:`input`. + + .. note:: If there are multiple minimal values in a reduced row then + the indices of the first minimal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (tuple, optional): the tuple of two output tensors (min, min_indices) + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.6248, 1.1334, -1.1899, -0.2803], + [-1.4644, -0.2635, -0.3651, 0.6134], + [ 0.2457, 0.0384, 1.0128, 0.7015], + [-0.1153, 2.9849, 2.1458, 0.5788]]) + >>> torch.min(a, 1) + torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0])) + + .. function:: min(input, other, *, out=None) -> Tensor + :noindex: + + See :func:`torch.minimum`. + """ + ... +@overload +def min(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.min: + r""" + min(input) -> Tensor + + Returns the minimum value of all elements in the :attr:`input` tensor. + + .. warning:: + This function produces deterministic (sub)gradients unlike ``min(dim=0)`` + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6750, 1.0857, 1.7197]]) + >>> torch.min(a) + tensor(0.6750) + + .. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`. And ``indices`` is the index location of each minimum value found + (argmin). + + If :attr:`keepdim` is ``True``, the output tensors are of the same size as + :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the output tensors having 1 fewer dimension than :attr:`input`. + + .. note:: If there are multiple minimal values in a reduced row then + the indices of the first minimal value are returned. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (tuple, optional): the tuple of two output tensors (min, min_indices) + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.6248, 1.1334, -1.1899, -0.2803], + [-1.4644, -0.2635, -0.3651, 0.6134], + [ 0.2457, 0.0384, 1.0128, 0.7015], + [-0.1153, 2.9849, 2.1458, 0.5788]]) + >>> torch.min(a, 1) + torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0])) + + .. function:: min(input, other, *, out=None) -> Tensor + :noindex: + + See :func:`torch.minimum`. + """ + ... +def minimum(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + minimum(input, other, *, out=None) -> Tensor + + Computes the element-wise minimum of :attr:`input` and :attr:`other`. + + .. note:: + If one of the elements being compared is a NaN, then that element is returned. + :func:`minimum` is not supported for tensors with complex dtypes. + + Args: + input (Tensor): the input tensor. + other (Tensor): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor((1, 2, -1)) + >>> b = torch.tensor((3, 0, 4)) + >>> torch.minimum(a, b) + tensor([1, 0, -1]) + """ + ... +def miopen_batch_norm(input: Tensor, weight: Tensor, bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, exponential_average_factor: _float, epsilon: _float) -> Tuple[Tensor, Tensor, Tensor]: ... +def miopen_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ... +def miopen_convolution_add_relu(input: Tensor, weight: Tensor, z: Tensor, alpha: Optional[Union[Number, _complex]], bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def miopen_convolution_relu(input: Tensor, weight: Tensor, bias: Optional[Tensor], stride: Sequence[Union[_int, SymInt]], padding: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def miopen_convolution_transpose(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], output_padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ... +def miopen_depthwise_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt], benchmark: _bool, deterministic: _bool) -> Tensor: ... +def miopen_rnn(input: Tensor, weight: Union[Tuple[Tensor, ...], List[Tensor]], weight_stride0: _int, hx: Tensor, cx: Optional[Tensor], mode: _int, hidden_size: _int, num_layers: _int, batch_first: _bool, dropout: _float, train: _bool, bidirectional: _bool, batch_sizes: _size, dropout_state: Optional[Tensor]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: ... +def mkldnn_adaptive_avg_pool2d(input: Tensor, output_size: Union[_int, _size], *, out: Optional[Tensor] = None) -> Tensor: ... +def mkldnn_convolution(input: Tensor, weight: Tensor, bias: Optional[Tensor], padding: Sequence[Union[_int, SymInt]], stride: Sequence[Union[_int, SymInt]], dilation: Sequence[Union[_int, SymInt]], groups: Union[_int, SymInt]) -> Tensor: ... +def mkldnn_linear_backward_weights(grad_output: Tensor, input: Tensor, weight: Tensor, bias_defined: _bool) -> Tuple[Tensor, Tensor]: ... +def mkldnn_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def mkldnn_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def mkldnn_rnn_layer(input: Tensor, weight0: Tensor, weight1: Tensor, weight2: Tensor, weight3: Tensor, hx_: Tensor, cx_: Tensor, reverse: _bool, batch_sizes: _size, mode: _int, hidden_size: _int, num_layers: _int, has_biases: _bool, bidirectional: _bool, batch_first: _bool, train: _bool) -> Tuple[Tensor, Tensor, Tensor, Tensor]: ... +def mm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + mm(input, mat2, *, out=None) -> Tensor + + Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`. + + If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a + :math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor. + + .. note:: This function does not :ref:`broadcast `. + For broadcasting matrix products, see :func:`torch.matmul`. + + Supports strided and sparse 2-D tensors as inputs, autograd with + respect to strided inputs. + + This operation has support for arguments with :ref:`sparse layouts`. + If :attr:`out` is provided it's layout will be used. Otherwise, the result + layout will be deduced from that of :attr:`input`. + + + .. warning:: + Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, + or may not have autograd support. If you notice missing functionality please + open a feature request. + + This operator supports :ref:`TensorFloat32`. + + On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision` for backward. + + Args: + input (Tensor): the first matrix to be matrix multiplied + mat2 (Tensor): the second matrix to be matrix multiplied + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.mm(mat1, mat2) + tensor([[ 0.4851, 0.5037, -0.3633], + [-0.0760, -3.6705, 2.4784]]) + """ + ... +@overload +def mode(input: Tensor, dim: _int = -1, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.mode: + r""" + mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + + Returns a namedtuple ``(values, indices)`` where ``values`` is the mode + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`, i.e. a value which appears most often + in that row, and ``indices`` is the index location of each mode value found. + + By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. + + If :attr:`keepdim` is ``True``, the output tensors are of the same size as + :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting + in the output tensors having 1 fewer dimension than :attr:`input`. + + .. note:: This function is not defined for ``torch.cuda.Tensor`` yet. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + + Example:: + + >>> b = torch.tensor( + [[0, 0, 0, 2, 0, 0, 2], + [0, 3, 0, 0, 2, 0, 1], + [2, 2, 2, 0, 0, 0, 3], + [2, 2, 3, 0, 1, 1, 0], + [1, 1, 0, 0, 2, 0, 2]]) + >>> torch.mode(b, 0) + torch.return_types.mode( + values=tensor([0, 2, 0, 0, 0, 0, 2]), + indices=tensor([1, 3, 4, 4, 2, 4, 4])) + """ + ... +@overload +def mode(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.mode: + r""" + mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + + Returns a namedtuple ``(values, indices)`` where ``values`` is the mode + value of each row of the :attr:`input` tensor in the given dimension + :attr:`dim`, i.e. a value which appears most often + in that row, and ``indices`` is the index location of each mode value found. + + By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. + + If :attr:`keepdim` is ``True``, the output tensors are of the same size as + :attr:`input` except in the dimension :attr:`dim` where they are of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting + in the output tensors having 1 fewer dimension than :attr:`input`. + + .. note:: This function is not defined for ``torch.cuda.Tensor`` yet. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + + Example:: + + >>> b = torch.tensor( + [[0, 0, 0, 2, 0, 0, 2], + [0, 3, 0, 0, 2, 0, 1], + [2, 2, 2, 0, 0, 0, 3], + [2, 2, 3, 0, 1, 1, 0], + [1, 1, 0, 0, 2, 0, 2]]) + >>> torch.mode(b, 0) + torch.return_types.mode( + values=tensor([0, 2, 0, 0, 0, 0, 2]), + indices=tensor([1, 3, 4, 4, 2, 4, 4])) + """ + ... +@overload +def moveaxis(input: Tensor, source: _int, destination: _int) -> Tensor: + r""" + moveaxis(input, source, destination) -> Tensor + + Alias for :func:`torch.movedim`. + + This function is equivalent to NumPy's moveaxis function. + + Examples:: + + >>> t = torch.randn(3,2,1) + >>> t + tensor([[[-0.3362], + [-0.8437]], + + [[-0.9627], + [ 0.1727]], + + [[ 0.5173], + [-0.1398]]]) + >>> torch.moveaxis(t, 1, 0).shape + torch.Size([2, 3, 1]) + >>> torch.moveaxis(t, 1, 0) + tensor([[[-0.3362], + [-0.9627], + [ 0.5173]], + + [[-0.8437], + [ 0.1727], + [-0.1398]]]) + >>> torch.moveaxis(t, (1, 2), (0, 1)).shape + torch.Size([2, 1, 3]) + >>> torch.moveaxis(t, (1, 2), (0, 1)) + tensor([[[-0.3362, -0.9627, 0.5173]], + + [[-0.8437, 0.1727, -0.1398]]]) + """ + ... +@overload +def moveaxis(input: Tensor, source: _size, destination: _size) -> Tensor: + r""" + moveaxis(input, source, destination) -> Tensor + + Alias for :func:`torch.movedim`. + + This function is equivalent to NumPy's moveaxis function. + + Examples:: + + >>> t = torch.randn(3,2,1) + >>> t + tensor([[[-0.3362], + [-0.8437]], + + [[-0.9627], + [ 0.1727]], + + [[ 0.5173], + [-0.1398]]]) + >>> torch.moveaxis(t, 1, 0).shape + torch.Size([2, 3, 1]) + >>> torch.moveaxis(t, 1, 0) + tensor([[[-0.3362], + [-0.9627], + [ 0.5173]], + + [[-0.8437], + [ 0.1727], + [-0.1398]]]) + >>> torch.moveaxis(t, (1, 2), (0, 1)).shape + torch.Size([2, 1, 3]) + >>> torch.moveaxis(t, (1, 2), (0, 1)) + tensor([[[-0.3362, -0.9627, 0.5173]], + + [[-0.8437, 0.1727, -0.1398]]]) + """ + ... +@overload +def movedim(input: Tensor, source: _int, destination: _int) -> Tensor: + r""" + movedim(input, source, destination) -> Tensor + + Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source` + to the position(s) in :attr:`destination`. + + Other dimensions of :attr:`input` that are not explicitly moved remain in + their original order and appear at the positions not specified in :attr:`destination`. + + Args: + input (Tensor): the input tensor. + source (int or tuple of ints): Original positions of the dims to move. These must be unique. + destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique. + + Examples:: + + >>> t = torch.randn(3,2,1) + >>> t + tensor([[[-0.3362], + [-0.8437]], + + [[-0.9627], + [ 0.1727]], + + [[ 0.5173], + [-0.1398]]]) + >>> torch.movedim(t, 1, 0).shape + torch.Size([2, 3, 1]) + >>> torch.movedim(t, 1, 0) + tensor([[[-0.3362], + [-0.9627], + [ 0.5173]], + + [[-0.8437], + [ 0.1727], + [-0.1398]]]) + >>> torch.movedim(t, (1, 2), (0, 1)).shape + torch.Size([2, 1, 3]) + >>> torch.movedim(t, (1, 2), (0, 1)) + tensor([[[-0.3362, -0.9627, 0.5173]], + + [[-0.8437, 0.1727, -0.1398]]]) + """ + ... +@overload +def movedim(input: Tensor, source: _size, destination: _size) -> Tensor: + r""" + movedim(input, source, destination) -> Tensor + + Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source` + to the position(s) in :attr:`destination`. + + Other dimensions of :attr:`input` that are not explicitly moved remain in + their original order and appear at the positions not specified in :attr:`destination`. + + Args: + input (Tensor): the input tensor. + source (int or tuple of ints): Original positions of the dims to move. These must be unique. + destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique. + + Examples:: + + >>> t = torch.randn(3,2,1) + >>> t + tensor([[[-0.3362], + [-0.8437]], + + [[-0.9627], + [ 0.1727]], + + [[ 0.5173], + [-0.1398]]]) + >>> torch.movedim(t, 1, 0).shape + torch.Size([2, 3, 1]) + >>> torch.movedim(t, 1, 0) + tensor([[[-0.3362], + [-0.9627], + [ 0.5173]], + + [[-0.8437], + [ 0.1727], + [-0.1398]]]) + >>> torch.movedim(t, (1, 2), (0, 1)).shape + torch.Size([2, 1, 3]) + >>> torch.movedim(t, (1, 2), (0, 1)) + tensor([[[-0.3362, -0.9627, 0.5173]], + + [[-0.8437, 0.1727, -0.1398]]]) + """ + ... +def msort(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + msort(input, *, out=None) -> Tensor + + Sorts the elements of the :attr:`input` tensor along its first dimension + in ascending order by value. + + .. note:: `torch.msort(t)` is equivalent to `torch.sort(t, dim=0)[0]`. + See also :func:`torch.sort`. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.randn(3, 4) + >>> t + tensor([[-0.1321, 0.4370, -1.2631, -1.1289], + [-2.0527, -1.1250, 0.2275, 0.3077], + [-0.0881, -0.1259, -0.5495, 1.0284]]) + >>> torch.msort(t) + tensor([[-2.0527, -1.1250, -1.2631, -1.1289], + [-0.1321, -0.1259, -0.5495, 0.3077], + [-0.0881, 0.4370, 0.2275, 1.0284]]) + """ + ... +def mul(input: Union[Tensor, Number, _complex], other: Union[Tensor, Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + mul(input, other, *, out=None) -> Tensor + + Multiplies :attr:`input` by :attr:`other`. + + + .. math:: + \text{out}_i = \text{input}_i \times \text{other}_i + + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer, float, and complex inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor or Number) - the tensor or number to multiply input by. + + Keyword args: + out (Tensor, optional): the output tensor. + + Examples:: + + >>> a = torch.randn(3) + >>> a + tensor([ 0.2015, -0.4255, 2.6087]) + >>> torch.mul(a, 100) + tensor([ 20.1494, -42.5491, 260.8663]) + + >>> b = torch.randn(4, 1) + >>> b + tensor([[ 1.1207], + [-0.3137], + [ 0.0700], + [ 0.8378]]) + >>> c = torch.randn(1, 4) + >>> c + tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]]) + >>> torch.mul(b, c) + tensor([[ 0.5767, 0.1363, -0.5877, 2.5083], + [-0.1614, -0.0382, 0.1645, -0.7021], + [ 0.0360, 0.0085, -0.0367, 0.1567], + [ 0.4312, 0.1019, -0.4394, 1.8753]]) + """ + ... +def multinomial(input: Tensor, num_samples: _int, replacement: _bool = False, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + multinomial(input, num_samples, replacement=False, *, generator=None, out=None) -> LongTensor + + Returns a tensor where each row contains :attr:`num_samples` indices sampled + from the multinomial (a stricter definition would be multivariate, + refer to torch.distributions.multinomial.Multinomial for more details) + probability distribution located in the corresponding row + of tensor :attr:`input`. + + .. note:: + The rows of :attr:`input` do not need to sum to one (in which case we use + the values as weights), but must be non-negative, finite and have + a non-zero sum. + + Indices are ordered from left to right according to when each was sampled + (first samples are placed in first column). + + If :attr:`input` is a vector, :attr:`out` is a vector of size :attr:`num_samples`. + + If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape + :math:`(m \times \text{num\_samples})`. + + If replacement is ``True``, samples are drawn with replacement. + + If not, they are drawn without replacement, which means that when a + sample index is drawn for a row, it cannot be drawn again for that row. + + .. note:: + When drawn without replacement, :attr:`num_samples` must be lower than + number of non-zero elements in :attr:`input` (or the min number of non-zero + elements in each row of :attr:`input` if it is a matrix). + + Args: + input (Tensor): the input tensor containing probabilities + num_samples (int): number of samples to draw + replacement (bool, optional): whether to draw with replacement or not + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + + Example:: + + >>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights + >>> torch.multinomial(weights, 2) + tensor([1, 2]) + >>> torch.multinomial(weights, 4) # ERROR! + RuntimeError: invalid argument 2: invalid multinomial distribution (with replacement=False, + not enough non-negative category to sample) at ../aten/src/TH/generic/THTensorRandom.cpp:320 + >>> torch.multinomial(weights, 4, replacement=True) + tensor([ 2, 1, 1, 1]) + """ + ... +@overload +def multiply(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + multiply(input, other, *, out=None) + + Alias for :func:`torch.mul`. + """ + ... +@overload +def multiply(input: Tensor, other: Union[Number, _complex]) -> Tensor: + r""" + multiply(input, other, *, out=None) + + Alias for :func:`torch.mul`. + """ + ... +def mv(input: Tensor, vec: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + mv(input, vec, *, out=None) -> Tensor + + Performs a matrix-vector product of the matrix :attr:`input` and the vector + :attr:`vec`. + + If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of + size :math:`m`, :attr:`out` will be 1-D of size :math:`n`. + + .. note:: This function does not :ref:`broadcast `. + + Args: + input (Tensor): matrix to be multiplied + vec (Tensor): vector to be multiplied + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> mat = torch.randn(2, 3) + >>> vec = torch.randn(3) + >>> torch.mv(mat, vec) + tensor([ 1.0404, -0.6361]) + """ + ... +def mvlgamma(input: Tensor, p: _int, *, out: Optional[Tensor] = None) -> Tensor: + r""" + mvlgamma(input, p, *, out=None) -> Tensor + + Alias for :func:`torch.special.multigammaln`. + """ + ... +def nan_to_num(input: Tensor, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None) -> Tensor + + Replaces :literal:`NaN`, positive infinity, and negative infinity values in :attr:`input` + with the values specified by :attr:`nan`, :attr:`posinf`, and :attr:`neginf`, respectively. + By default, :literal:`NaN`\ s are replaced with zero, positive infinity is replaced with the + greatest finite value representable by :attr:`input`'s dtype, and negative infinity + is replaced with the least finite value representable by :attr:`input`'s dtype. + + Args: + input (Tensor): the input tensor. + nan (Number, optional): the value to replace :literal:`NaN`\s with. Default is zero. + posinf (Number, optional): if a Number, the value to replace positive infinity values with. + If None, positive infinity values are replaced with the greatest finite value representable by :attr:`input`'s dtype. + Default is None. + neginf (Number, optional): if a Number, the value to replace negative infinity values with. + If None, negative infinity values are replaced with the lowest finite value representable by :attr:`input`'s dtype. + Default is None. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14]) + >>> torch.nan_to_num(x) + tensor([ 0.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00]) + >>> torch.nan_to_num(x, nan=2.0) + tensor([ 2.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00]) + >>> torch.nan_to_num(x, nan=2.0, posinf=1.0) + tensor([ 2.0000e+00, 1.0000e+00, -3.4028e+38, 3.1400e+00]) + """ + ... +def nan_to_num_(input: Tensor, nan: Optional[_float] = None, posinf: Optional[_float] = None, neginf: Optional[_float] = None) -> Tensor: ... +def nanmean(input: Tensor, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor + + Computes the mean of all `non-NaN` elements along the specified dimensions. + + This function is identical to :func:`torch.mean` when there are no `NaN` values + in the :attr:`input` tensor. In the presence of `NaN`, :func:`torch.mean` will + propagate the `NaN` to the output whereas :func:`torch.nanmean` will ignore the + `NaN` values (`torch.nanmean(a)` is equivalent to `torch.mean(a[~a.isnan()])`). + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + out (Tensor, optional): the output tensor. + + .. seealso:: + + :func:`torch.mean` computes the mean value, propagating `NaN`. + + Example:: + + >>> x = torch.tensor([[torch.nan, 1, 2], [1, 2, 3]]) + >>> x.mean() + tensor(nan) + >>> x.nanmean() + tensor(1.8000) + >>> x.mean(dim=0) + tensor([ nan, 1.5000, 2.5000]) + >>> x.nanmean(dim=0) + tensor([1.0000, 1.5000, 2.5000]) + + # If all elements in the reduced dimensions are NaN then the result is NaN + >>> torch.tensor([torch.nan]).nanmean() + tensor(nan) + """ + ... +@overload +def nanmedian(input: Tensor) -> Tensor: + r""" + nanmedian(input) -> Tensor + + Returns the median of the values in :attr:`input`, ignoring ``NaN`` values. + + This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`. + When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``, + while this function will return the median of the non-``NaN`` elements in :attr:`input`. + If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.tensor([1, float('nan'), 3, 2]) + >>> a.median() + tensor(nan) + >>> a.nanmedian() + tensor(2.) + + .. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` + in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values + found in the dimension :attr:`dim`. + + This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has + one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the + median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second + tensor, which must have dtype long, with their indices in the dimension + :attr:`dim` of :attr:`input`. + + Example:: + + >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]]) + >>> a + tensor([[2., 3., 1.], + [nan, 1., nan]]) + >>> a.median(0) + torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1])) + >>> a.nanmedian(0) + torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0])) + """ + ... +@overload +def nanmedian(input: Tensor, dim: _int, keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.nanmedian: + r""" + nanmedian(input) -> Tensor + + Returns the median of the values in :attr:`input`, ignoring ``NaN`` values. + + This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`. + When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``, + while this function will return the median of the non-``NaN`` elements in :attr:`input`. + If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.tensor([1, float('nan'), 3, 2]) + >>> a.median() + tensor(nan) + >>> a.nanmedian() + tensor(2.) + + .. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` + in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values + found in the dimension :attr:`dim`. + + This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has + one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the + median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second + tensor, which must have dtype long, with their indices in the dimension + :attr:`dim` of :attr:`input`. + + Example:: + + >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]]) + >>> a + tensor([[2., 3., 1.], + [nan, 1., nan]]) + >>> a.median(0) + torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1])) + >>> a.nanmedian(0) + torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0])) + """ + ... +@overload +def nanmedian(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.nanmedian: + r""" + nanmedian(input) -> Tensor + + Returns the median of the values in :attr:`input`, ignoring ``NaN`` values. + + This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`. + When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``, + while this function will return the median of the non-``NaN`` elements in :attr:`input`. + If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.tensor([1, float('nan'), 3, 2]) + >>> a.median() + tensor(nan) + >>> a.nanmedian() + tensor(2.) + + .. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + + Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` + in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values + found in the dimension :attr:`dim`. + + This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has + one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the + median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second + tensor, which must have dtype long, with their indices in the dimension + :attr:`dim` of :attr:`input`. + + Example:: + + >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]]) + >>> a + tensor([[2., 3., 1.], + [nan, 1., nan]]) + >>> a.median(0) + torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1])) + >>> a.nanmedian(0) + torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0])) + """ + ... +@overload +def nanquantile(input: Tensor, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor: + r""" + nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor + + This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values, + computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did + not exist. If all values in a reduced row are ``NaN`` then the quantiles for + that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`. + + Args: + input (Tensor): the input tensor. + q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1] + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword arguments: + interpolation (str): interpolation method to use when the desired quantile lies between two data points. + Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``. + Default is ``linear``. + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.tensor([float('nan'), 1, 2]) + >>> t.quantile(0.5) + tensor(nan) + >>> t.nanquantile(0.5) + tensor(1.5000) + >>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]]) + >>> t + tensor([[nan, nan], + [1., 2.]]) + >>> t.nanquantile(0.5, dim=0) + tensor([1., 2.]) + >>> t.nanquantile(0.5, dim=1) + tensor([ nan, 1.5000]) + """ + ... +@overload +def nanquantile(input: Tensor, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor: + r""" + nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor + + This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values, + computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did + not exist. If all values in a reduced row are ``NaN`` then the quantiles for + that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`. + + Args: + input (Tensor): the input tensor. + q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1] + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword arguments: + interpolation (str): interpolation method to use when the desired quantile lies between two data points. + Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``. + Default is ``linear``. + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.tensor([float('nan'), 1, 2]) + >>> t.quantile(0.5) + tensor(nan) + >>> t.nanquantile(0.5) + tensor(1.5000) + >>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]]) + >>> t + tensor([[nan, nan], + [1., 2.]]) + >>> t.nanquantile(0.5, dim=0) + tensor([1., 2.]) + >>> t.nanquantile(0.5, dim=1) + tensor([ nan, 1.5000]) + """ + ... +def nansum(input: Tensor, dim: Optional[Union[_int, _size]] = None, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + nansum(input, *, dtype=None) -> Tensor + + Returns the sum of all elements, treating Not a Numbers (NaNs) as zero. + + Args: + input (Tensor): the input tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.tensor([1., 2., float('nan'), 4.]) + >>> torch.nansum(a) + tensor(7.) + + .. function:: nansum(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + + Returns the sum of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`, treating Not a Numbers (NaNs) as zero. + If :attr:`dim` is a list of dimensions, reduce over all of them. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> torch.nansum(torch.tensor([1., float("nan")])) + 1.0 + >>> a = torch.tensor([[1, 2], [3., float("nan")]]) + >>> torch.nansum(a) + tensor(6.) + >>> torch.nansum(a, dim=0) + tensor([4., 2.]) + >>> torch.nansum(a, dim=1) + tensor([3., 3.]) + """ + ... +@overload +def narrow(input: Tensor, dim: _int, start: Tensor, length: Union[_int, SymInt]) -> Tensor: + r""" + narrow(input, dim, start, length) -> Tensor + + Returns a new tensor that is a narrowed version of :attr:`input` tensor. The + dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The + returned tensor and :attr:`input` tensor share the same underlying storage. + + Args: + input (Tensor): the tensor to narrow + dim (int): the dimension along which to narrow + start (int or Tensor): index of the element to start the narrowed dimension + from. Can be negative, which means indexing from the end of `dim`. If + `Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed) + length (int): length of the narrowed dimension, must be weakly positive + + Example:: + + >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> torch.narrow(x, 0, 0, 2) + tensor([[ 1, 2, 3], + [ 4, 5, 6]]) + >>> torch.narrow(x, 1, 1, 2) + tensor([[ 2, 3], + [ 5, 6], + [ 8, 9]]) + >>> torch.narrow(x, -1, torch.tensor(-1), 1) + tensor([[3], + [6], + [9]]) + """ + ... +@overload +def narrow(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt]) -> Tensor: + r""" + narrow(input, dim, start, length) -> Tensor + + Returns a new tensor that is a narrowed version of :attr:`input` tensor. The + dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The + returned tensor and :attr:`input` tensor share the same underlying storage. + + Args: + input (Tensor): the tensor to narrow + dim (int): the dimension along which to narrow + start (int or Tensor): index of the element to start the narrowed dimension + from. Can be negative, which means indexing from the end of `dim`. If + `Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed) + length (int): length of the narrowed dimension, must be weakly positive + + Example:: + + >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> torch.narrow(x, 0, 0, 2) + tensor([[ 1, 2, 3], + [ 4, 5, 6]]) + >>> torch.narrow(x, 1, 1, 2) + tensor([[ 2, 3], + [ 5, 6], + [ 8, 9]]) + >>> torch.narrow(x, -1, torch.tensor(-1), 1) + tensor([[3], + [6], + [9]]) + """ + ... +def narrow_copy(input: Tensor, dim: _int, start: Union[_int, SymInt], length: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor: + r""" + narrow_copy(input, dim, start, length, *, out=None) -> Tensor + + Same as :meth:`Tensor.narrow` except this returns a copy rather + than shared storage. This is primarily for sparse tensors, which + do not have a shared-storage narrow method. + + Args: + input (Tensor): the tensor to narrow + dim (int): the dimension along which to narrow + start (int): index of the element to start the narrowed dimension from. Can + be negative, which means indexing from the end of `dim` + length (int): length of the narrowed dimension, must be weakly positive + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> torch.narrow_copy(x, 0, 0, 2) + tensor([[ 1, 2, 3], + [ 4, 5, 6]]) + >>> torch.narrow_copy(x, 1, 1, 2) + tensor([[ 2, 3], + [ 5, 6], + [ 8, 9]]) + >>> s = torch.arange(16).reshape(2, 2, 2, 2).to_sparse(2) + >>> torch.narrow_copy(s, 0, 0, 1) + tensor(indices=tensor([[0, 0], + [0, 1]]), + values=tensor([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]), + size=(1, 2, 2, 2), nnz=2, layout=torch.sparse_coo) + + .. seealso:: + + :func:`torch.narrow` for a non copy variant + """ + ... +def native_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], running_mean: Optional[Tensor], running_var: Optional[Tensor], training: _bool, momentum: _float, eps: _float, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> Tuple[Tensor, Tensor, Tensor]: ... +def native_channel_shuffle(input: Tensor, groups: Union[_int, SymInt]) -> Tensor: ... +def native_dropout(input: Tensor, p: _float, train: Optional[_bool]) -> Tuple[Tensor, Tensor]: ... +def native_group_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], N: Union[_int, SymInt], C: Union[_int, SymInt], HxW: Union[_int, SymInt], group: _int, eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ... +def native_layer_norm(input: Tensor, normalized_shape: Sequence[Union[_int, SymInt]], weight: Optional[Tensor], bias: Optional[Tensor], eps: _float) -> Tuple[Tensor, Tensor, Tensor]: ... +@overload +def native_norm(input: Tensor, p: Optional[Union[Number, _complex]], dim: Union[_int, _size], keepdim: _bool, dtype: Optional[_dtype]) -> Tensor: ... +@overload +def native_norm(input: Tensor, p: Union[Number, _complex] = 2) -> Tensor: ... +@overload +def ne(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + ne(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} \neq \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere + + Example:: + + >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, True], [True, False]]) + """ + ... +@overload +def ne(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + ne(input, other, *, out=None) -> Tensor + + Computes :math:`\text{input} \neq \text{other}` element-wise. + + + The second argument can be a number or a tensor whose shape is + :ref:`broadcastable ` with the first argument. + + Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere + + Example:: + + >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, True], [True, False]]) + """ + ... +def neg(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + neg(input, *, out=None) -> Tensor + + Returns a new tensor with the negative of the elements of :attr:`input`. + + .. math:: + \text{out} = -1 \times \text{input} + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(5) + >>> a + tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) + >>> torch.neg(a) + tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940]) + """ + ... +def neg_(input: Tensor) -> Tensor: ... +def negative(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + negative(input, *, out=None) -> Tensor + + Alias for :func:`torch.neg` + """ + ... +def negative_(input: Tensor) -> Tensor: ... +def nextafter(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + nextafter(input, other, *, out=None) -> Tensor + + Return the next floating-point value after :attr:`input` towards :attr:`other`, elementwise. + + The shapes of ``input`` and ``other`` must be + :ref:`broadcastable `. + + Args: + input (Tensor): the first input tensor + other (Tensor): the second input tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> eps = torch.finfo(torch.float32).eps + >>> torch.nextafter(torch.tensor([1.0, 2.0]), torch.tensor([2.0, 1.0])) == torch.tensor([eps + 1, 2 - eps]) + tensor([True, True]) + """ + ... +@overload +def nonzero(input: Tensor, *, as_tuple: Literal[False] = False, out: Optional[Tensor] = None) -> Tensor: + r""" + nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors + + .. note:: + :func:`torch.nonzero(..., as_tuple=False) ` (default) returns a + 2-D tensor where each row is the index for a nonzero value. + + :func:`torch.nonzero(..., as_tuple=True) ` returns a tuple of 1-D + index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]`` + gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor + contains nonzero indices for a certain dimension. + + See below for more details on the two behaviors. + + When :attr:`input` is on CUDA, :func:`torch.nonzero() ` causes + host-device synchronization. + + **When** :attr:`as_tuple` **is** ``False`` **(default)**: + + Returns a tensor containing the indices of all non-zero elements of + :attr:`input`. Each row in the result contains the indices of a non-zero + element in :attr:`input`. The result is sorted lexicographically, with + the last index changing the fastest (C-style). + + If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor + :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of + non-zero elements in the :attr:`input` tensor. + + **When** :attr:`as_tuple` **is** ``True``: + + Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`, + each containing the indices (in that dimension) of all non-zero elements of + :attr:`input` . + + If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n` + tensors of size :math:`z`, where :math:`z` is the total number of + non-zero elements in the :attr:`input` tensor. + + As a special case, when :attr:`input` has zero dimensions and a nonzero scalar + value, it is treated as a one-dimensional tensor with one element. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (LongTensor, optional): the output tensor containing indices + + Returns: + LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output + tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for + each dimension, containing the indices of each nonzero element along that + dimension. + + Example:: + + >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1])) + tensor([[ 0], + [ 1], + [ 2], + [ 4]]) + >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.0, 0.0, 1.2, 0.0], + ... [0.0, 0.0, 0.0,-0.4]])) + tensor([[ 0, 0], + [ 1, 1], + [ 2, 2], + [ 3, 3]]) + >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True) + (tensor([0, 1, 2, 4]),) + >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.0, 0.0, 1.2, 0.0], + ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True) + (tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3])) + >>> torch.nonzero(torch.tensor(5), as_tuple=True) + (tensor([0]),) + """ + ... +@overload +def nonzero(input: Tensor, *, as_tuple: Literal[True]) -> Tuple[Tensor, ...]: + r""" + nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors + + .. note:: + :func:`torch.nonzero(..., as_tuple=False) ` (default) returns a + 2-D tensor where each row is the index for a nonzero value. + + :func:`torch.nonzero(..., as_tuple=True) ` returns a tuple of 1-D + index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]`` + gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor + contains nonzero indices for a certain dimension. + + See below for more details on the two behaviors. + + When :attr:`input` is on CUDA, :func:`torch.nonzero() ` causes + host-device synchronization. + + **When** :attr:`as_tuple` **is** ``False`` **(default)**: + + Returns a tensor containing the indices of all non-zero elements of + :attr:`input`. Each row in the result contains the indices of a non-zero + element in :attr:`input`. The result is sorted lexicographically, with + the last index changing the fastest (C-style). + + If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor + :attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of + non-zero elements in the :attr:`input` tensor. + + **When** :attr:`as_tuple` **is** ``True``: + + Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`, + each containing the indices (in that dimension) of all non-zero elements of + :attr:`input` . + + If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n` + tensors of size :math:`z`, where :math:`z` is the total number of + non-zero elements in the :attr:`input` tensor. + + As a special case, when :attr:`input` has zero dimensions and a nonzero scalar + value, it is treated as a one-dimensional tensor with one element. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (LongTensor, optional): the output tensor containing indices + + Returns: + LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output + tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for + each dimension, containing the indices of each nonzero element along that + dimension. + + Example:: + + >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1])) + tensor([[ 0], + [ 1], + [ 2], + [ 4]]) + >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.0, 0.0, 1.2, 0.0], + ... [0.0, 0.0, 0.0,-0.4]])) + tensor([[ 0, 0], + [ 1, 1], + [ 2, 2], + [ 3, 3]]) + >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True) + (tensor([0, 1, 2, 4]),) + >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.0, 0.0, 1.2, 0.0], + ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True) + (tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3])) + >>> torch.nonzero(torch.tensor(5), as_tuple=True) + (tensor([0]),) + """ + ... +def nonzero_static(input: Tensor, *, size: _int, fill_value: _int = -1, out: Optional[Tensor] = None) -> Tensor: ... +def norm_except_dim(v: Tensor, pow: _int = 2, dim: _int = 0) -> Tensor: ... +@overload +def normal(mean: Tensor, std: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + normal(mean, std, *, generator=None, out=None) -> Tensor + + Returns a tensor of random numbers drawn from separate normal distributions + whose mean and standard deviation are given. + + The :attr:`mean` is a tensor with the mean of + each output element's normal distribution + + The :attr:`std` is a tensor with the standard deviation of + each output element's normal distribution + + The shapes of :attr:`mean` and :attr:`std` don't need to match, but the + total number of elements in each tensor need to be the same. + + .. note:: When the shapes do not match, the shape of :attr:`mean` + is used as the shape for the returned output tensor + + .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes + its device with the CPU. + + Args: + mean (Tensor): the tensor of per-element means + std (Tensor): the tensor of per-element standard deviations + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1)) + tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134, + 8.0505, 8.1408, 9.0563, 10.0566]) + + .. function:: normal(mean=0.0, std, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the means are shared among all drawn + elements. + + Args: + mean (float, optional): the mean for all distributions + std (Tensor): the tensor of per-element standard deviations + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(mean=0.5, std=torch.arange(1., 6.)) + tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303]) + + .. function:: normal(mean, std=1.0, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the standard deviations are shared among + all drawn elements. + + Args: + mean (Tensor): the tensor of per-element means + std (float, optional): the standard deviation for all distributions + + Keyword args: + out (Tensor, optional): the output tensor + + Example:: + + >>> torch.normal(mean=torch.arange(1., 6.)) + tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361]) + + .. function:: normal(mean, std, size, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the means and standard deviations are shared + among all drawn elements. The resulting tensor has size given by :attr:`size`. + + Args: + mean (float): the mean for all distributions + std (float): the standard deviation for all distributions + size (int...): a sequence of integers defining the shape of the output tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(2, 3, size=(1, 4)) + tensor([[-1.3987, -1.9544, 3.6048, 0.7909]]) + """ + ... +@overload +def normal(mean: Tensor, std: _float = 1, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + normal(mean, std, *, generator=None, out=None) -> Tensor + + Returns a tensor of random numbers drawn from separate normal distributions + whose mean and standard deviation are given. + + The :attr:`mean` is a tensor with the mean of + each output element's normal distribution + + The :attr:`std` is a tensor with the standard deviation of + each output element's normal distribution + + The shapes of :attr:`mean` and :attr:`std` don't need to match, but the + total number of elements in each tensor need to be the same. + + .. note:: When the shapes do not match, the shape of :attr:`mean` + is used as the shape for the returned output tensor + + .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes + its device with the CPU. + + Args: + mean (Tensor): the tensor of per-element means + std (Tensor): the tensor of per-element standard deviations + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1)) + tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134, + 8.0505, 8.1408, 9.0563, 10.0566]) + + .. function:: normal(mean=0.0, std, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the means are shared among all drawn + elements. + + Args: + mean (float, optional): the mean for all distributions + std (Tensor): the tensor of per-element standard deviations + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(mean=0.5, std=torch.arange(1., 6.)) + tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303]) + + .. function:: normal(mean, std=1.0, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the standard deviations are shared among + all drawn elements. + + Args: + mean (Tensor): the tensor of per-element means + std (float, optional): the standard deviation for all distributions + + Keyword args: + out (Tensor, optional): the output tensor + + Example:: + + >>> torch.normal(mean=torch.arange(1., 6.)) + tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361]) + + .. function:: normal(mean, std, size, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the means and standard deviations are shared + among all drawn elements. The resulting tensor has size given by :attr:`size`. + + Args: + mean (float): the mean for all distributions + std (float): the standard deviation for all distributions + size (int...): a sequence of integers defining the shape of the output tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(2, 3, size=(1, 4)) + tensor([[-1.3987, -1.9544, 3.6048, 0.7909]]) + """ + ... +@overload +def normal(mean: _float, std: Tensor, *, generator: Optional[Generator] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + normal(mean, std, *, generator=None, out=None) -> Tensor + + Returns a tensor of random numbers drawn from separate normal distributions + whose mean and standard deviation are given. + + The :attr:`mean` is a tensor with the mean of + each output element's normal distribution + + The :attr:`std` is a tensor with the standard deviation of + each output element's normal distribution + + The shapes of :attr:`mean` and :attr:`std` don't need to match, but the + total number of elements in each tensor need to be the same. + + .. note:: When the shapes do not match, the shape of :attr:`mean` + is used as the shape for the returned output tensor + + .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes + its device with the CPU. + + Args: + mean (Tensor): the tensor of per-element means + std (Tensor): the tensor of per-element standard deviations + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1)) + tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134, + 8.0505, 8.1408, 9.0563, 10.0566]) + + .. function:: normal(mean=0.0, std, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the means are shared among all drawn + elements. + + Args: + mean (float, optional): the mean for all distributions + std (Tensor): the tensor of per-element standard deviations + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(mean=0.5, std=torch.arange(1., 6.)) + tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303]) + + .. function:: normal(mean, std=1.0, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the standard deviations are shared among + all drawn elements. + + Args: + mean (Tensor): the tensor of per-element means + std (float, optional): the standard deviation for all distributions + + Keyword args: + out (Tensor, optional): the output tensor + + Example:: + + >>> torch.normal(mean=torch.arange(1., 6.)) + tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361]) + + .. function:: normal(mean, std, size, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the means and standard deviations are shared + among all drawn elements. The resulting tensor has size given by :attr:`size`. + + Args: + mean (float): the mean for all distributions + std (float): the standard deviation for all distributions + size (int...): a sequence of integers defining the shape of the output tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(2, 3, size=(1, 4)) + tensor([[-1.3987, -1.9544, 3.6048, 0.7909]]) + """ + ... +@overload +def normal(mean: _float, std: _float, size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator] = None, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + normal(mean, std, *, generator=None, out=None) -> Tensor + + Returns a tensor of random numbers drawn from separate normal distributions + whose mean and standard deviation are given. + + The :attr:`mean` is a tensor with the mean of + each output element's normal distribution + + The :attr:`std` is a tensor with the standard deviation of + each output element's normal distribution + + The shapes of :attr:`mean` and :attr:`std` don't need to match, but the + total number of elements in each tensor need to be the same. + + .. note:: When the shapes do not match, the shape of :attr:`mean` + is used as the shape for the returned output tensor + + .. note:: When :attr:`std` is a CUDA tensor, this function synchronizes + its device with the CPU. + + Args: + mean (Tensor): the tensor of per-element means + std (Tensor): the tensor of per-element standard deviations + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1)) + tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134, + 8.0505, 8.1408, 9.0563, 10.0566]) + + .. function:: normal(mean=0.0, std, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the means are shared among all drawn + elements. + + Args: + mean (float, optional): the mean for all distributions + std (Tensor): the tensor of per-element standard deviations + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(mean=0.5, std=torch.arange(1., 6.)) + tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303]) + + .. function:: normal(mean, std=1.0, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the standard deviations are shared among + all drawn elements. + + Args: + mean (Tensor): the tensor of per-element means + std (float, optional): the standard deviation for all distributions + + Keyword args: + out (Tensor, optional): the output tensor + + Example:: + + >>> torch.normal(mean=torch.arange(1., 6.)) + tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361]) + + .. function:: normal(mean, std, size, *, out=None) -> Tensor + :noindex: + + Similar to the function above, but the means and standard deviations are shared + among all drawn elements. The resulting tensor has size given by :attr:`size`. + + Args: + mean (float): the mean for all distributions + std (float): the standard deviation for all distributions + size (int...): a sequence of integers defining the shape of the output tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.normal(2, 3, size=(1, 4)) + tensor([[-1.3987, -1.9544, 3.6048, 0.7909]]) + """ + ... +@overload +def not_equal(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + not_equal(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.ne`. + """ + ... +@overload +def not_equal(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + not_equal(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.ne`. + """ + ... +@overload +def nuclear_norm(input: Tensor, dim: Union[_int, _size], keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +@overload +def nuclear_norm(input: Tensor, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: ... +def numel(self: Tensor) -> _int: + r""" + numel(input) -> int + + Returns the total number of elements in the :attr:`input` tensor. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> a = torch.randn(1, 2, 3, 4, 5) + >>> torch.numel(a) + 120 + >>> a = torch.zeros(4,4) + >>> torch.numel(a) + 16 + """ + ... +@overload +def ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with the scalar value `1`, with the shape defined + by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.ones(2, 3) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.]]) + + >>> torch.ones(5) + tensor([ 1., 1., 1., 1., 1.]) + """ + ... +@overload +def ones(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with the scalar value `1`, with the shape defined + by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.ones(2, 3) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.]]) + + >>> torch.ones(5) + tensor([ 1., 1., 1., 1., 1.]) + """ + ... +@overload +def ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with the scalar value `1`, with the shape defined + by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.ones(2, 3) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.]]) + + >>> torch.ones(5) + tensor([ 1., 1., 1., 1., 1.]) + """ + ... +@overload +def ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with the scalar value `1`, with the shape defined + by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword arguments: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.ones(2, 3) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.]]) + + >>> torch.ones(5) + tensor([ 1., 1., 1., 1., 1.]) + """ + ... +def ones_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + ones_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + + Returns a tensor filled with the scalar value `1`, with the same size as + :attr:`input`. ``torch.ones_like(input)`` is equivalent to + ``torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + + .. warning:: + As of 0.4, this function does not support an :attr:`out` keyword. As an alternative, + the old ``torch.ones_like(input, out=output)`` is equivalent to + ``torch.ones(input.size(), out=output)``. + + Args: + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + + Keyword arguments: + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + + Example:: + + >>> input = torch.empty(2, 3) + >>> torch.ones_like(input) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.]]) + """ + ... +def orgqr(input: Tensor, input2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + orgqr(input, tau) -> Tensor + + Alias for :func:`torch.linalg.householder_product`. + """ + ... +def ormqr(input: Tensor, input2: Tensor, input3: Tensor, left: _bool = True, transpose: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + ormqr(input, tau, other, left=True, transpose=False, *, out=None) -> Tensor + + Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix. + + Multiplies a :math:`m \times n` matrix `C` (given by :attr:`other`) with a matrix `Q`, + where `Q` is represented using Householder reflectors `(input, tau)`. + See `Representation of Orthogonal or Unitary Matrices`_ for further details. + + If :attr:`left` is `True` then `op(Q)` times `C` is computed, otherwise the result is `C` times `op(Q)`. + When :attr:`left` is `True`, the implicit matrix `Q` has size :math:`m \times m`. + It has size :math:`n \times n` otherwise. + If :attr:`transpose` is `True` then `op` is the conjugate transpose operation, otherwise it's a no-op. + + Supports inputs of float, double, cfloat and cdouble dtypes. + Also supports batched inputs, and, if the input is batched, the output is batched with the same dimensions. + + .. seealso:: + :func:`torch.geqrf` can be used to form the Householder representation `(input, tau)` of matrix `Q` + from the QR decomposition. + + .. note:: + This function supports backward but it is only fast when ``(input, tau)`` do not require gradients + and/or ``tau.size(-1)`` is very small. + `` + + Args: + input (Tensor): tensor of shape `(*, mn, k)` where `*` is zero or more batch dimensions + and `mn` equals to `m` or `n` depending on the :attr:`left`. + tau (Tensor): tensor of shape `(*, min(mn, k))` where `*` is zero or more batch dimensions. + other (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions. + left (bool): controls the order of multiplication. + transpose (bool): controls whether the matrix `Q` is conjugate transposed or not. + + Keyword args: + out (Tensor, optional): the output Tensor. Ignored if `None`. Default: `None`. + + .. _Representation of Orthogonal or Unitary Matrices: + https://www.netlib.org/lapack/lug/node128.html + """ + ... +def outer(input: Tensor, vec2: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + outer(input, vec2, *, out=None) -> Tensor + + Outer product of :attr:`input` and :attr:`vec2`. + If :attr:`input` is a vector of size :math:`n` and :attr:`vec2` is a vector of + size :math:`m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`. + + .. note:: This function does not :ref:`broadcast `. + + Args: + input (Tensor): 1-D input vector + vec2 (Tensor): 1-D input vector + + Keyword args: + out (Tensor, optional): optional output matrix + + Example:: + + >>> v1 = torch.arange(1., 5.) + >>> v2 = torch.arange(1., 4.) + >>> torch.outer(v1, v2) + tensor([[ 1., 2., 3.], + [ 2., 4., 6.], + [ 3., 6., 9.], + [ 4., 8., 12.]]) + """ + ... +def pairwise_distance(x1: Tensor, x2: Tensor, p: _float = 2, eps: _float = 1e-06, keepdim: _bool = False) -> Tensor: ... +def pdist(input: Tensor, p: _float = 2) -> Tensor: ... +def permute(input: Tensor, dims: _size) -> Tensor: + r""" + permute(input, dims) -> Tensor + + Returns a view of the original tensor :attr:`input` with its dimensions permuted. + + Args: + input (Tensor): the input tensor. + dims (tuple of int): The desired ordering of dimensions + + Example: + >>> x = torch.randn(2, 3, 5) + >>> x.size() + torch.Size([2, 3, 5]) + >>> torch.permute(x, (2, 0, 1)).size() + torch.Size([5, 2, 3]) + """ + ... +def permute_copy(input: Tensor, dims: _size, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.permute`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def pinverse(input: Tensor, rcond: _float = 1e-15) -> Tensor: + r""" + pinverse(input, rcond=1e-15) -> Tensor + + Alias for :func:`torch.linalg.pinv` + """ + ... +def pixel_shuffle(input: Tensor, upscale_factor: _int) -> Tensor: ... +def pixel_unshuffle(input: Tensor, downscale_factor: _int) -> Tensor: ... +def poisson(input: Tensor, generator: Optional[Generator] = None) -> Tensor: + r""" + poisson(input, generator=None) -> Tensor + + Returns a tensor of the same size as :attr:`input` with each element + sampled from a Poisson distribution with rate parameter given by the corresponding + element in :attr:`input` i.e., + + .. math:: + \text{out}_i \sim \text{Poisson}(\text{input}_i) + + :attr:`input` must be non-negative. + + Args: + input (Tensor): the input tensor containing the rates of the Poisson distribution + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + + Example:: + + >>> rates = torch.rand(4, 4) * 5 # rate parameter between 0 and 5 + >>> torch.poisson(rates) + tensor([[9., 1., 3., 5.], + [8., 6., 6., 0.], + [0., 4., 5., 3.], + [2., 1., 4., 2.]]) + """ + ... +def poisson_nll_loss(input: Tensor, target: Tensor, log_input: _bool, full: _bool, eps: _float, reduction: _int) -> Tensor: ... +def polar(abs: Tensor, angle: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + polar(abs, angle, *, out=None) -> Tensor + + Constructs a complex tensor whose elements are Cartesian coordinates + corresponding to the polar coordinates with absolute value :attr:`abs` and angle + :attr:`angle`. + + .. math:: + \text{out} = \text{abs} \cdot \cos(\text{angle}) + \text{abs} \cdot \sin(\text{angle}) \cdot j + + .. note:: + `torch.polar` is similar to + `std::polar `_ + and does not compute the polar decomposition + of a complex tensor like Python's `cmath.polar` and SciPy's `linalg.polar` do. + The behavior of this function is undefined if `abs` is negative or NaN, or if `angle` is + infinite. + + + Args: + abs (Tensor): The absolute value the complex tensor. Must be float or double. + angle (Tensor): The angle of the complex tensor. Must be same dtype as + :attr:`abs`. + + Keyword args: + out (Tensor): If the inputs are ``torch.float32``, must be + ``torch.complex64``. If the inputs are ``torch.float64``, must be + ``torch.complex128``. + + Example:: + + >>> import numpy as np + >>> abs = torch.tensor([1, 2], dtype=torch.float64) + >>> angle = torch.tensor([np.pi / 2, 5 * np.pi / 4], dtype=torch.float64) + >>> z = torch.polar(abs, angle) + >>> z + tensor([(0.0000+1.0000j), (-1.4142-1.4142j)], dtype=torch.complex128) + """ + ... +def polygamma(n: _int, input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + polygamma(n, input, *, out=None) -> Tensor + + Alias for :func:`torch.special.polygamma`. + """ + ... +def positive(input: Tensor) -> Tensor: + r""" + positive(input) -> Tensor + + Returns :attr:`input`. + Throws a runtime error if :attr:`input` is a bool tensor. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> t = torch.randn(5) + >>> t + tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) + >>> torch.positive(t) + tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) + """ + ... +@overload +def pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + pow(input, exponent, *, out=None) -> Tensor + + Takes the power of each element in :attr:`input` with :attr:`exponent` and + returns a tensor with the result. + + :attr:`exponent` can be either a single ``float`` number or a `Tensor` + with the same number of elements as :attr:`input`. + + When :attr:`exponent` is a scalar value, the operation applied is: + + .. math:: + \text{out}_i = x_i ^ \text{exponent} + + When :attr:`exponent` is a tensor, the operation applied is: + + .. math:: + \text{out}_i = x_i ^ {\text{exponent}_i} + + When :attr:`exponent` is a tensor, the shapes of :attr:`input` + and :attr:`exponent` must be :ref:`broadcastable `. + + Args: + input (Tensor): the input tensor. + exponent (float or tensor): the exponent value + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.4331, 1.2475, 0.6834, -0.2791]) + >>> torch.pow(a, 2) + tensor([ 0.1875, 1.5561, 0.4670, 0.0779]) + >>> exp = torch.arange(1., 5.) + + >>> a = torch.arange(1., 5.) + >>> a + tensor([ 1., 2., 3., 4.]) + >>> exp + tensor([ 1., 2., 3., 4.]) + >>> torch.pow(a, exp) + tensor([ 1., 4., 27., 256.]) + + .. function:: pow(self, exponent, *, out=None) -> Tensor + :noindex: + + :attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor. + The returned tensor :attr:`out` is of the same shape as :attr:`exponent` + + The operation applied is: + + .. math:: + \text{out}_i = \text{self} ^ {\text{exponent}_i} + + Args: + self (float): the scalar base value for the power operation + exponent (Tensor): the exponent tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> exp = torch.arange(1., 5.) + >>> base = 2 + >>> torch.pow(base, exp) + tensor([ 2., 4., 8., 16.]) + """ + ... +@overload +def pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + pow(input, exponent, *, out=None) -> Tensor + + Takes the power of each element in :attr:`input` with :attr:`exponent` and + returns a tensor with the result. + + :attr:`exponent` can be either a single ``float`` number or a `Tensor` + with the same number of elements as :attr:`input`. + + When :attr:`exponent` is a scalar value, the operation applied is: + + .. math:: + \text{out}_i = x_i ^ \text{exponent} + + When :attr:`exponent` is a tensor, the operation applied is: + + .. math:: + \text{out}_i = x_i ^ {\text{exponent}_i} + + When :attr:`exponent` is a tensor, the shapes of :attr:`input` + and :attr:`exponent` must be :ref:`broadcastable `. + + Args: + input (Tensor): the input tensor. + exponent (float or tensor): the exponent value + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.4331, 1.2475, 0.6834, -0.2791]) + >>> torch.pow(a, 2) + tensor([ 0.1875, 1.5561, 0.4670, 0.0779]) + >>> exp = torch.arange(1., 5.) + + >>> a = torch.arange(1., 5.) + >>> a + tensor([ 1., 2., 3., 4.]) + >>> exp + tensor([ 1., 2., 3., 4.]) + >>> torch.pow(a, exp) + tensor([ 1., 4., 27., 256.]) + + .. function:: pow(self, exponent, *, out=None) -> Tensor + :noindex: + + :attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor. + The returned tensor :attr:`out` is of the same shape as :attr:`exponent` + + The operation applied is: + + .. math:: + \text{out}_i = \text{self} ^ {\text{exponent}_i} + + Args: + self (float): the scalar base value for the power operation + exponent (Tensor): the exponent tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> exp = torch.arange(1., 5.) + >>> base = 2 + >>> torch.pow(base, exp) + tensor([ 2., 4., 8., 16.]) + """ + ... +@overload +def pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + pow(input, exponent, *, out=None) -> Tensor + + Takes the power of each element in :attr:`input` with :attr:`exponent` and + returns a tensor with the result. + + :attr:`exponent` can be either a single ``float`` number or a `Tensor` + with the same number of elements as :attr:`input`. + + When :attr:`exponent` is a scalar value, the operation applied is: + + .. math:: + \text{out}_i = x_i ^ \text{exponent} + + When :attr:`exponent` is a tensor, the operation applied is: + + .. math:: + \text{out}_i = x_i ^ {\text{exponent}_i} + + When :attr:`exponent` is a tensor, the shapes of :attr:`input` + and :attr:`exponent` must be :ref:`broadcastable `. + + Args: + input (Tensor): the input tensor. + exponent (float or tensor): the exponent value + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.4331, 1.2475, 0.6834, -0.2791]) + >>> torch.pow(a, 2) + tensor([ 0.1875, 1.5561, 0.4670, 0.0779]) + >>> exp = torch.arange(1., 5.) + + >>> a = torch.arange(1., 5.) + >>> a + tensor([ 1., 2., 3., 4.]) + >>> exp + tensor([ 1., 2., 3., 4.]) + >>> torch.pow(a, exp) + tensor([ 1., 4., 27., 256.]) + + .. function:: pow(self, exponent, *, out=None) -> Tensor + :noindex: + + :attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor. + The returned tensor :attr:`out` is of the same shape as :attr:`exponent` + + The operation applied is: + + .. math:: + \text{out}_i = \text{self} ^ {\text{exponent}_i} + + Args: + self (float): the scalar base value for the power operation + exponent (Tensor): the exponent tensor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> exp = torch.arange(1., 5.) + >>> base = 2 + >>> torch.pow(base, exp) + tensor([ 2., 4., 8., 16.]) + """ + ... +def prelu(input: Tensor, weight: Tensor) -> Tensor: ... +@overload +def prod(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + prod(input, *, dtype=None) -> Tensor + + Returns the product of all elements in the :attr:`input` tensor. + + Args: + input (Tensor): the input tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[-0.8020, 0.5428, -1.5854]]) + >>> torch.prod(a) + tensor(0.6902) + + .. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + + Returns the product of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the output tensor having 1 fewer dimension than :attr:`input`. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(4, 2) + >>> a + tensor([[ 0.5261, -0.3837], + [ 1.1857, -0.2498], + [-1.1646, 0.0705], + [ 1.1131, -1.0629]]) + >>> torch.prod(a, 1) + tensor([-0.2018, -0.2962, -0.0821, -1.1831]) + """ + ... +@overload +def prod(input: Tensor, dim: _int, keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + prod(input, *, dtype=None) -> Tensor + + Returns the product of all elements in the :attr:`input` tensor. + + Args: + input (Tensor): the input tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[-0.8020, 0.5428, -1.5854]]) + >>> torch.prod(a) + tensor(0.6902) + + .. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + + Returns the product of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the output tensor having 1 fewer dimension than :attr:`input`. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(4, 2) + >>> a + tensor([[ 0.5261, -0.3837], + [ 1.1857, -0.2498], + [-1.1646, 0.0705], + [ 1.1131, -1.0629]]) + >>> torch.prod(a, 1) + tensor([-0.2018, -0.2962, -0.0821, -1.1831]) + """ + ... +@overload +def prod(input: Tensor, dim: Union[str, ellipsis, None], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + prod(input, *, dtype=None) -> Tensor + + Returns the product of all elements in the :attr:`input` tensor. + + Args: + input (Tensor): the input tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[-0.8020, 0.5428, -1.5854]]) + >>> torch.prod(a) + tensor(0.6902) + + .. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + + Returns the product of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in + the output tensor having 1 fewer dimension than :attr:`input`. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(4, 2) + >>> a + tensor([[ 0.5261, -0.3837], + [ 1.1857, -0.2498], + [-1.1646, 0.0705], + [ 1.1131, -1.0629]]) + >>> torch.prod(a, 1) + tensor([-0.2018, -0.2962, -0.0821, -1.1831]) + """ + ... +def promote_types(type1: _dtype, type2: _dtype) -> _dtype: + r""" + promote_types(type1, type2) -> dtype + + Returns the :class:`torch.dtype` with the smallest size and scalar kind that is + not smaller nor of lower kind than either `type1` or `type2`. See type promotion + :ref:`documentation ` for more information on the type + promotion logic. + + Args: + type1 (:class:`torch.dtype`) + type2 (:class:`torch.dtype`) + + Example:: + + >>> torch.promote_types(torch.int32, torch.float32) + torch.float32 + >>> torch.promote_types(torch.uint8, torch.long) + torch.long + """ + ... +def put(input: Tensor, index: Tensor, source: Tensor, accumulate: _bool = False) -> Tensor: ... +def q_per_channel_axis(input: Tensor) -> _int: ... +def q_per_channel_scales(input: Tensor) -> Tensor: ... +def q_per_channel_zero_points(input: Tensor) -> Tensor: ... +def q_scale(input: Tensor) -> _float: ... +def q_zero_point(input: Tensor) -> _int: ... +def qr(input: Tensor, some: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.qr: + r""" + qr(input, some=True, *, out=None) -> (Tensor, Tensor) + + Computes the QR decomposition of a matrix or a batch of matrices :attr:`input`, + and returns a namedtuple (Q, R) of tensors such that :math:`\text{input} = Q R` + with :math:`Q` being an orthogonal matrix or batch of orthogonal matrices and + :math:`R` being an upper triangular matrix or batch of upper triangular matrices. + + If :attr:`some` is ``True``, then this function returns the thin (reduced) QR factorization. + Otherwise, if :attr:`some` is ``False``, this function returns the complete QR factorization. + + .. warning:: + + :func:`torch.qr` is deprecated in favor of :func:`torch.linalg.qr` + and will be removed in a future PyTorch release. The boolean parameter :attr:`some` has been + replaced with a string parameter :attr:`mode`. + + ``Q, R = torch.qr(A)`` should be replaced with + + .. code:: python + + Q, R = torch.linalg.qr(A) + + ``Q, R = torch.qr(A, some=False)`` should be replaced with + + .. code:: python + + Q, R = torch.linalg.qr(A, mode="complete") + + .. warning:: + If you plan to backpropagate through QR, note that the current backward implementation + is only well-defined when the first :math:`\min(input.size(-1), input.size(-2))` + columns of :attr:`input` are linearly independent. + This behavior will probably change once QR supports pivoting. + + .. note:: This function uses LAPACK for CPU inputs and MAGMA for CUDA inputs, + and may produce different (valid) decompositions on different device types + or different platforms. + + Args: + input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more + batch dimensions consisting of matrices of dimension :math:`m \times n`. + some (bool, optional): Set to ``True`` for reduced QR decomposition and ``False`` for + complete QR decomposition. If `k = min(m, n)` then: + + * ``some=True`` : returns `(Q, R)` with dimensions (m, k), (k, n) (default) + + * ``'some=False'``: returns `(Q, R)` with dimensions (m, m), (m, n) + + Keyword args: + out (tuple, optional): tuple of `Q` and `R` tensors. + The dimensions of `Q` and `R` are detailed in the description of :attr:`some` above. + + Example:: + + >>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]]) + >>> q, r = torch.qr(a) + >>> q + tensor([[-0.8571, 0.3943, 0.3314], + [-0.4286, -0.9029, -0.0343], + [ 0.2857, -0.1714, 0.9429]]) + >>> r + tensor([[ -14.0000, -21.0000, 14.0000], + [ 0.0000, -175.0000, 70.0000], + [ 0.0000, 0.0000, -35.0000]]) + >>> torch.mm(q, r).round() + tensor([[ 12., -51., 4.], + [ 6., 167., -68.], + [ -4., 24., -41.]]) + >>> torch.mm(q.t(), q).round() + tensor([[ 1., 0., 0.], + [ 0., 1., -0.], + [ 0., -0., 1.]]) + >>> a = torch.randn(3, 4, 5) + >>> q, r = torch.qr(a, some=False) + >>> torch.allclose(torch.matmul(q, r), a) + True + >>> torch.allclose(torch.matmul(q.mT, q), torch.eye(5)) + True + """ + ... +@overload +def quantile(input: Tensor, q: Tensor, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor: + r""" + quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor + + Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`. + + To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location + of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with + indices ``i`` and ``j`` in the sorted order, result is computed according to the given + :attr:`interpolation` method as follows: + + - ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index. + - ``lower``: ``a``. + - ``higher``: ``b``. + - ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions). + - ``midpoint``: ``(a + b) / 2``. + + If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size + equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction. + + .. note:: + By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation. + + Args: + input (Tensor): the input tensor. + q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1]. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword arguments: + interpolation (str): interpolation method to use when the desired quantile lies between two data points. + Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``. + Default is ``linear``. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(2, 3) + >>> a + tensor([[ 0.0795, -1.2117, 0.9765], + [ 1.1707, 0.6706, 0.4884]]) + >>> q = torch.tensor([0.25, 0.5, 0.75]) + >>> torch.quantile(a, q, dim=1, keepdim=True) + tensor([[[-0.5661], + [ 0.5795]], + + [[ 0.0795], + [ 0.6706]], + + [[ 0.5280], + [ 0.9206]]]) + >>> torch.quantile(a, q, dim=1, keepdim=True).shape + torch.Size([3, 2, 1]) + >>> a = torch.arange(4.) + >>> a + tensor([0., 1., 2., 3.]) + >>> torch.quantile(a, 0.6, interpolation='linear') + tensor(1.8000) + >>> torch.quantile(a, 0.6, interpolation='lower') + tensor(1.) + >>> torch.quantile(a, 0.6, interpolation='higher') + tensor(2.) + >>> torch.quantile(a, 0.6, interpolation='midpoint') + tensor(1.5000) + >>> torch.quantile(a, 0.6, interpolation='nearest') + tensor(2.) + >>> torch.quantile(a, 0.4, interpolation='nearest') + tensor(1.) + """ + ... +@overload +def quantile(input: Tensor, q: _float, dim: Optional[_int] = None, keepdim: _bool = False, *, interpolation: str = "linear", out: Optional[Tensor] = None) -> Tensor: + r""" + quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor + + Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`. + + To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location + of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with + indices ``i`` and ``j`` in the sorted order, result is computed according to the given + :attr:`interpolation` method as follows: + + - ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index. + - ``lower``: ``a``. + - ``higher``: ``b``. + - ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions). + - ``midpoint``: ``(a + b) / 2``. + + If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size + equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction. + + .. note:: + By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation. + + Args: + input (Tensor): the input tensor. + q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1]. + dim (int): the dimension to reduce. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword arguments: + interpolation (str): interpolation method to use when the desired quantile lies between two data points. + Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``. + Default is ``linear``. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(2, 3) + >>> a + tensor([[ 0.0795, -1.2117, 0.9765], + [ 1.1707, 0.6706, 0.4884]]) + >>> q = torch.tensor([0.25, 0.5, 0.75]) + >>> torch.quantile(a, q, dim=1, keepdim=True) + tensor([[[-0.5661], + [ 0.5795]], + + [[ 0.0795], + [ 0.6706]], + + [[ 0.5280], + [ 0.9206]]]) + >>> torch.quantile(a, q, dim=1, keepdim=True).shape + torch.Size([3, 2, 1]) + >>> a = torch.arange(4.) + >>> a + tensor([0., 1., 2., 3.]) + >>> torch.quantile(a, 0.6, interpolation='linear') + tensor(1.8000) + >>> torch.quantile(a, 0.6, interpolation='lower') + tensor(1.) + >>> torch.quantile(a, 0.6, interpolation='higher') + tensor(2.) + >>> torch.quantile(a, 0.6, interpolation='midpoint') + tensor(1.5000) + >>> torch.quantile(a, 0.6, interpolation='nearest') + tensor(2.) + >>> torch.quantile(a, 0.4, interpolation='nearest') + tensor(1.) + """ + ... +def quantize_per_channel(input: Tensor, scales: Tensor, zero_points: Tensor, axis: _int, dtype: _dtype) -> Tensor: + r""" + quantize_per_channel(input, scales, zero_points, axis, dtype) -> Tensor + + Converts a float tensor to a per-channel quantized tensor with given scales and zero points. + + Arguments: + input (Tensor): float tensor to quantize + scales (Tensor): float 1D tensor of scales to use, size should match ``input.size(axis)`` + zero_points (int): integer 1D tensor of offset to use, size should match ``input.size(axis)`` + axis (int): dimension on which apply per-channel quantization + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32`` + + Returns: + Tensor: A newly quantized tensor + + Example:: + + >>> x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]]) + >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8) + tensor([[-1., 0.], + [ 1., 2.]], size=(2, 2), dtype=torch.quint8, + quantization_scheme=torch.per_channel_affine, + scale=tensor([0.1000, 0.0100], dtype=torch.float64), + zero_point=tensor([10, 0]), axis=0) + >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8).int_repr() + tensor([[ 0, 10], + [100, 200]], dtype=torch.uint8) + """ + ... +@overload +def quantize_per_tensor(input: Tensor, scale: Tensor, zero_point: Tensor, dtype: _dtype) -> Tensor: + r""" + quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor + + Converts a float tensor to a quantized tensor with given scale and zero point. + + Arguments: + input (Tensor): float tensor or list of tensors to quantize + scale (float or Tensor): scale to apply in quantization formula + zero_point (int or Tensor): offset in integer value that maps to float zero + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32`` + + Returns: + Tensor: A newly quantized tensor or list of quantized tensors. + + Example:: + + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr() + tensor([ 0, 10, 20, 30], dtype=torch.uint8) + >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])], + >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8) + (tensor([-1., 0.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10), + tensor([-2., 2.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20)) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10) + """ + ... +@overload +def quantize_per_tensor(input: Tensor, scale: _float, zero_point: _int, dtype: _dtype) -> Tensor: + r""" + quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor + + Converts a float tensor to a quantized tensor with given scale and zero point. + + Arguments: + input (Tensor): float tensor or list of tensors to quantize + scale (float or Tensor): scale to apply in quantization formula + zero_point (int or Tensor): offset in integer value that maps to float zero + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32`` + + Returns: + Tensor: A newly quantized tensor or list of quantized tensors. + + Example:: + + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr() + tensor([ 0, 10, 20, 30], dtype=torch.uint8) + >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])], + >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8) + (tensor([-1., 0.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10), + tensor([-2., 2.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20)) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10) + """ + ... +@overload +def quantize_per_tensor(tensors: Union[Tuple[Tensor, ...], List[Tensor]], scales: Tensor, zero_points: Tensor, dtype: _dtype) -> Tuple[Tensor, ...]: + r""" + quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor + + Converts a float tensor to a quantized tensor with given scale and zero point. + + Arguments: + input (Tensor): float tensor or list of tensors to quantize + scale (float or Tensor): scale to apply in quantization formula + zero_point (int or Tensor): offset in integer value that maps to float zero + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32`` + + Returns: + Tensor: A newly quantized tensor or list of quantized tensors. + + Example:: + + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr() + tensor([ 0, 10, 20, 30], dtype=torch.uint8) + >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])], + >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8) + (tensor([-1., 0.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10), + tensor([-2., 2.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20)) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10) + """ + ... +def quantize_per_tensor_dynamic(input: Tensor, dtype: _dtype, reduce_range: _bool) -> Tensor: + r""" + quantize_per_tensor_dynamic(input, dtype, reduce_range) -> Tensor + + Converts a float tensor to a quantized tensor with scale and zero_point calculated + dynamically based on the input. + + Arguments: + input (Tensor): float tensor or list of tensors to quantize + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8`` + reduce_range (bool): a flag to indicate whether to reduce the range of quantized + data by 1 bit, it's required to avoid instruction overflow for some hardwares + + Returns: + Tensor: A newly (dynamically) quantized tensor + + Example:: + + >>> t = torch.quantize_per_tensor_dynamic(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.quint8, False) + >>> print(t) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.011764705882352941, + zero_point=85) + >>> t.int_repr() + tensor([ 0, 85, 170, 255], dtype=torch.uint8) + """ + ... +def quantized_batch_norm(input: Tensor, weight: Optional[Tensor], bias: Optional[Tensor], mean: Tensor, var: Tensor, eps: _float, output_scale: _float, output_zero_point: _int) -> Tensor: + r""" + quantized_batch_norm(input, weight=None, bias=None, mean, var, eps, output_scale, output_zero_point) -> Tensor + + Applies batch normalization on a 4D (NCHW) quantized tensor. + + .. math:: + + y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + + Arguments: + input (Tensor): quantized tensor + weight (Tensor): float tensor that corresponds to the gamma, size C + bias (Tensor): float tensor that corresponds to the beta, size C + mean (Tensor): float mean value in batch normalization, size C + var (Tensor): float tensor for variance, size C + eps (float): a value added to the denominator for numerical stability. + output_scale (float): output quantized tensor scale + output_zero_point (int): output quantized tensor zero_point + + Returns: + Tensor: A quantized tensor with batch normalization applied. + + Example:: + + >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8) + >>> torch.quantized_batch_norm(qx, torch.ones(2), torch.zeros(2), torch.rand(2), torch.rand(2), 0.00001, 0.2, 2) + tensor([[[[-0.2000, -0.2000], + [ 1.6000, -0.2000]], + + [[-0.4000, -0.4000], + [-0.4000, 0.6000]]], + + + [[[-0.2000, -0.2000], + [-0.2000, -0.2000]], + + [[ 0.6000, -0.4000], + [ 0.6000, -0.4000]]]], size=(2, 2, 2, 2), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=2) + """ + ... +def quantized_gru_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ... +def quantized_lstm_cell(input: Tensor, hx: Union[Tuple[Tensor, ...], List[Tensor]], w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tuple[Tensor, Tensor]: ... +def quantized_max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: + r""" + quantized_max_pool1d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor + + Applies a 1D max pooling over an input quantized tensor composed of several input planes. + + Arguments: + input (Tensor): quantized tensor + kernel_size (list of int): the size of the sliding window + stride (``list of int``, optional): the stride of the sliding window + padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2 + dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1 + ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape. + Defaults to False. + + + Returns: + Tensor: A quantized tensor with max_pool1d applied. + + Example:: + + >>> qx = torch.quantize_per_tensor(torch.rand(2, 2), 1.5, 3, torch.quint8) + >>> torch.quantized_max_pool1d(qx, [2]) + tensor([[0.0000], + [1.5000]], size=(2, 1), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3) + """ + ... +def quantized_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: + r""" + quantized_max_pool2d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor + + Applies a 2D max pooling over an input quantized tensor composed of several input planes. + + Arguments: + input (Tensor): quantized tensor + kernel_size (``list of int``): the size of the sliding window + stride (``list of int``, optional): the stride of the sliding window + padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2 + dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1 + ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape. + Defaults to False. + + + Returns: + Tensor: A quantized tensor with max_pool2d applied. + + Example:: + + >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8) + >>> torch.quantized_max_pool2d(qx, [2,2]) + tensor([[[[1.5000]], + + [[1.5000]]], + + + [[[0.0000]], + + [[0.0000]]]], size=(2, 2, 1, 1), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3) + """ + ... +def quantized_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Union[_int, _size] = (), padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: _bool = False) -> Tensor: ... +def quantized_rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ... +def quantized_rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Tensor, b_hh: Tensor, packed_ih: Tensor, packed_hh: Tensor, col_offsets_ih: Tensor, col_offsets_hh: Tensor, scale_ih: Union[Number, _complex], scale_hh: Union[Number, _complex], zero_point_ih: Union[Number, _complex], zero_point_hh: Union[Number, _complex]) -> Tensor: ... +def rad2deg(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + rad2deg(input, *, out=None) -> Tensor + + Returns a new tensor with each of the elements of :attr:`input` + converted from angles in radians to degrees. + + Args: + input (Tensor): the input tensor. + + Keyword arguments: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]]) + >>> torch.rad2deg(a) + tensor([[ 180.0233, -180.0233], + [ 359.9894, -359.9894], + [ 89.9544, -89.9544]]) + """ + ... +def rad2deg_(input: Tensor) -> Tensor: ... +@overload +def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a tensor filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)` + + The shape of the tensor is defined by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) + """ + ... +@overload +def rand(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a tensor filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)` + + The shape of the tensor is defined by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) + """ + ... +@overload +def rand(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a tensor filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)` + + The shape of the tensor is defined by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) + """ + ... +@overload +def rand(*size: _int, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a tensor filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)` + + The shape of the tensor is defined by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) + """ + ... +@overload +def rand(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a tensor filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)` + + The shape of the tensor is defined by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) + """ + ... +@overload +def rand(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a tensor filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)` + + The shape of the tensor is defined by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) + """ + ... +@overload +def rand(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a tensor filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)` + + The shape of the tensor is defined by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) + """ + ... +@overload +def rand(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a tensor filled with random numbers from a uniform distribution + on the interval :math:`[0, 1)` + + The shape of the tensor is defined by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) + """ + ... +def rand_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + rand_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + + Returns a tensor with the same size as :attr:`input` that is filled with + random numbers from a uniform distribution on the interval :math:`[0, 1)`. + ``torch.rand_like(input)`` is equivalent to + ``torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + + Args: + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... +@overload +def randint(low: _int, high: _int, size: _size, *, generator: Optional[Generator] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with random integers generated uniformly + between :attr:`low` (inclusive) and :attr:`high` (exclusive). + + The shape of the tensor is defined by the variable argument :attr:`size`. + + .. note:: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + + Args: + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + size (tuple): a tuple defining the shape of the output tensor. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``, + this function returns a tensor with dtype ``torch.int64``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.randint(3, 5, (3,)) + tensor([4, 3, 4]) + + + >>> torch.randint(10, (2, 2)) + tensor([[0, 2], + [5, 5]]) + + + >>> torch.randint(3, 10, (2, 2)) + tensor([[4, 5], + [6, 7]]) + """ + ... +@overload +def randint(high: _int, size: _size, *, generator: Optional[Generator] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with random integers generated uniformly + between :attr:`low` (inclusive) and :attr:`high` (exclusive). + + The shape of the tensor is defined by the variable argument :attr:`size`. + + .. note:: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + + Args: + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + size (tuple): a tuple defining the shape of the output tensor. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``, + this function returns a tensor with dtype ``torch.int64``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.randint(3, 5, (3,)) + tensor([4, 3, 4]) + + + >>> torch.randint(10, (2, 2)) + tensor([[0, 2], + [5, 5]]) + + + >>> torch.randint(3, 10, (2, 2)) + tensor([[4, 5], + [6, 7]]) + """ + ... +@overload +def randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with random integers generated uniformly + between :attr:`low` (inclusive) and :attr:`high` (exclusive). + + The shape of the tensor is defined by the variable argument :attr:`size`. + + .. note:: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + + Args: + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + size (tuple): a tuple defining the shape of the output tensor. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``, + this function returns a tensor with dtype ``torch.int64``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.randint(3, 5, (3,)) + tensor([4, 3, 4]) + + + >>> torch.randint(10, (2, 2)) + tensor([[0, 2], + [5, 5]]) + + + >>> torch.randint(3, 10, (2, 2)) + tensor([[4, 5], + [6, 7]]) + """ + ... +@overload +def randint(high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with random integers generated uniformly + between :attr:`low` (inclusive) and :attr:`high` (exclusive). + + The shape of the tensor is defined by the variable argument :attr:`size`. + + .. note:: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + + Args: + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + size (tuple): a tuple defining the shape of the output tensor. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``, + this function returns a tensor with dtype ``torch.int64``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.randint(3, 5, (3,)) + tensor([4, 3, 4]) + + + >>> torch.randint(10, (2, 2)) + tensor([[0, 2], + [5, 5]]) + + + >>> torch.randint(3, 10, (2, 2)) + tensor([[4, 5], + [6, 7]]) + """ + ... +@overload +def randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with random integers generated uniformly + between :attr:`low` (inclusive) and :attr:`high` (exclusive). + + The shape of the tensor is defined by the variable argument :attr:`size`. + + .. note:: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + + Args: + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + size (tuple): a tuple defining the shape of the output tensor. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``, + this function returns a tensor with dtype ``torch.int64``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.randint(3, 5, (3,)) + tensor([4, 3, 4]) + + + >>> torch.randint(10, (2, 2)) + tensor([[0, 2], + [5, 5]]) + + + >>> torch.randint(3, 10, (2, 2)) + tensor([[4, 5], + [6, 7]]) + """ + ... +@overload +def randint(low: Union[_int, SymInt], high: Union[_int, SymInt], size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randint(low=0, high, size, \*, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with random integers generated uniformly + between :attr:`low` (inclusive) and :attr:`high` (exclusive). + + The shape of the tensor is defined by the variable argument :attr:`size`. + + .. note:: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + + Args: + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + size (tuple): a tuple defining the shape of the output tensor. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``, + this function returns a tensor with dtype ``torch.int64``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.randint(3, 5, (3,)) + tensor([4, 3, 4]) + + + >>> torch.randint(10, (2, 2)) + tensor([[0, 2], + [5, 5]]) + + + >>> torch.randint(3, 10, (2, 2)) + tensor([[4, 5], + [6, 7]]) + """ + ... +@overload +def randint_like(input: Tensor, high: Union[_int, SymInt], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randint_like(input, low=0, high, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + + Returns a tensor with the same shape as Tensor :attr:`input` filled with + random integers generated uniformly between :attr:`low` (inclusive) and + :attr:`high` (exclusive). + + .. note: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + + Args: + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... +@overload +def randint_like(input: Tensor, low: Union[_int, SymInt], high: Union[_int, SymInt], *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randint_like(input, low=0, high, \*, dtype=None, layout=torch.strided, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + + Returns a tensor with the same shape as Tensor :attr:`input` filled with + random integers generated uniformly between :attr:`low` (inclusive) and + :attr:`high` (exclusive). + + .. note: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + + Args: + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... +@overload +def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + + Returns a tensor filled with random numbers from a normal distribution + with mean `0` and variance `1` (also called the standard normal + distribution). + + .. math:: + \text{out}_{i} \sim \mathcal{N}(0, 1) + + For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and + unit variance as + + .. math:: + \text{out}_{i} \sim \mathcal{CN}(0, 1) + + This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary + :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as + + .. math:: + \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad + \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}) + + The shape of the tensor is defined by the variable argument :attr:`size`. + + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + + .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution + """ + ... +@overload +def randn(*size: _int, generator: Optional[Generator], names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + + Returns a tensor filled with random numbers from a normal distribution + with mean `0` and variance `1` (also called the standard normal + distribution). + + .. math:: + \text{out}_{i} \sim \mathcal{N}(0, 1) + + For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and + unit variance as + + .. math:: + \text{out}_{i} \sim \mathcal{CN}(0, 1) + + This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary + :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as + + .. math:: + \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad + \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}) + + The shape of the tensor is defined by the variable argument :attr:`size`. + + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + + .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution + """ + ... +@overload +def randn(size: Sequence[Union[_int, SymInt]], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + + Returns a tensor filled with random numbers from a normal distribution + with mean `0` and variance `1` (also called the standard normal + distribution). + + .. math:: + \text{out}_{i} \sim \mathcal{N}(0, 1) + + For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and + unit variance as + + .. math:: + \text{out}_{i} \sim \mathcal{CN}(0, 1) + + This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary + :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as + + .. math:: + \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad + \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}) + + The shape of the tensor is defined by the variable argument :attr:`size`. + + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + + .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution + """ + ... +@overload +def randn(*size: _int, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + + Returns a tensor filled with random numbers from a normal distribution + with mean `0` and variance `1` (also called the standard normal + distribution). + + .. math:: + \text{out}_{i} \sim \mathcal{N}(0, 1) + + For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and + unit variance as + + .. math:: + \text{out}_{i} \sim \mathcal{CN}(0, 1) + + This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary + :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as + + .. math:: + \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad + \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}) + + The shape of the tensor is defined by the variable argument :attr:`size`. + + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + + .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution + """ + ... +@overload +def randn(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + + Returns a tensor filled with random numbers from a normal distribution + with mean `0` and variance `1` (also called the standard normal + distribution). + + .. math:: + \text{out}_{i} \sim \mathcal{N}(0, 1) + + For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and + unit variance as + + .. math:: + \text{out}_{i} \sim \mathcal{CN}(0, 1) + + This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary + :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as + + .. math:: + \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad + \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}) + + The shape of the tensor is defined by the variable argument :attr:`size`. + + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + + .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution + """ + ... +@overload +def randn(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + + Returns a tensor filled with random numbers from a normal distribution + with mean `0` and variance `1` (also called the standard normal + distribution). + + .. math:: + \text{out}_{i} \sim \mathcal{N}(0, 1) + + For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and + unit variance as + + .. math:: + \text{out}_{i} \sim \mathcal{CN}(0, 1) + + This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary + :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as + + .. math:: + \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad + \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}) + + The shape of the tensor is defined by the variable argument :attr:`size`. + + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + + .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution + """ + ... +@overload +def randn(size: Sequence[Union[_int, SymInt]], *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + + Returns a tensor filled with random numbers from a normal distribution + with mean `0` and variance `1` (also called the standard normal + distribution). + + .. math:: + \text{out}_{i} \sim \mathcal{N}(0, 1) + + For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and + unit variance as + + .. math:: + \text{out}_{i} \sim \mathcal{CN}(0, 1) + + This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary + :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as + + .. math:: + \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad + \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}) + + The shape of the tensor is defined by the variable argument :attr:`size`. + + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + + .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution + """ + ... +@overload +def randn(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + + Returns a tensor filled with random numbers from a normal distribution + with mean `0` and variance `1` (also called the standard normal + distribution). + + .. math:: + \text{out}_{i} \sim \mathcal{N}(0, 1) + + For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and + unit variance as + + .. math:: + \text{out}_{i} \sim \mathcal{CN}(0, 1) + + This is equivalent to separately sampling the real :math:`(\operatorname{Re})` and imaginary + :math:`(\operatorname{Im})` part of :math:`\text{out}_i` as + + .. math:: + \operatorname{Re}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}),\quad + \operatorname{Im}(\text{out}_{i}) \sim \mathcal{N}(0, \frac{1}{2}) + + The shape of the tensor is defined by the variable argument :attr:`size`. + + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + + .. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution + """ + ... +def randn_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randn_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + + Returns a tensor with the same size as :attr:`input` that is filled with + random numbers from a normal distribution with mean 0 and variance 1. Please refer to :func:`torch.randn` for the + sampling process of complex dtypes. ``torch.randn_like(input)`` is equivalent to + ``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + + Args: + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + """ + ... +@overload +def randperm(n: Union[_int, SymInt], *, generator: Optional[Generator], out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a random permutation of integers from ``0`` to ``n - 1``. + + Args: + n (int): the upper bound (exclusive) + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: ``torch.int64``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randperm(4) + tensor([2, 1, 0, 3]) + """ + ... +@overload +def randperm(n: Union[_int, SymInt], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Returns a random permutation of integers from ``0`` to ``n - 1``. + + Args: + n (int): the upper bound (exclusive) + + Keyword args: + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: ``torch.int64``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + Example:: + + >>> torch.randperm(4) + tensor([2, 1, 0, 3]) + """ + ... +def range(start: Number, end: Number, step: Number = 1, *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + range(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1` + with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is + the gap between two values in the tensor. + + .. math:: + \text{out}_{i+1} = \text{out}_i + \text{step}. + + .. warning:: + This function is deprecated and will be removed in a future release because its behavior is inconsistent with + Python's range builtin. Instead, use :func:`torch.arange`, which produces values in [start, end). + + Args: + start (float): the starting value for the set of points. Default: ``0``. + end (float): the ending value for the set of points + step (float): the gap between each pair of adjacent points. Default: ``1``. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.range(1, 4) + tensor([ 1., 2., 3., 4.]) + >>> torch.range(1, 4, 0.5) + tensor([ 1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000]) + """ + ... +def ravel(input: Tensor) -> Tensor: + r""" + ravel(input) -> Tensor + + Return a contiguous flattened tensor. A copy is made only if needed. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> t = torch.tensor([[[1, 2], + ... [3, 4]], + ... [[5, 6], + ... [7, 8]]]) + >>> torch.ravel(t) + tensor([1, 2, 3, 4, 5, 6, 7, 8]) + """ + ... +def real(input: Tensor) -> Tensor: + r""" + real(input) -> Tensor + + Returns a new tensor containing real values of the :attr:`self` tensor. + The returned tensor and :attr:`self` share the same underlying storage. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> x=torch.randn(4, dtype=torch.cfloat) + >>> x + tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)]) + >>> x.real + tensor([ 0.3100, -0.5445, -1.6492, -0.0638]) + """ + ... +def reciprocal(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + reciprocal(input, *, out=None) -> Tensor + + Returns a new tensor with the reciprocal of the elements of :attr:`input` + + .. math:: + \text{out}_{i} = \frac{1}{\text{input}_{i}} + + .. note:: + Unlike NumPy's reciprocal, torch.reciprocal supports integral inputs. Integral + inputs to reciprocal are automatically :ref:`promoted ` to + the default scalar type. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.4595, -2.1219, -1.4314, 0.7298]) + >>> torch.reciprocal(a) + tensor([-2.1763, -0.4713, -0.6986, 1.3702]) + """ + ... +def reciprocal_(input: Tensor) -> Tensor: ... +def relu(input: Tensor) -> Tensor: ... +def relu_(input: Tensor) -> Tensor: ... +@overload +def remainder(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + remainder(input, other, *, out=None) -> Tensor + + Computes + `Python's modulus operation `_ + entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value + is less than that of :attr:`other`. + + It may also be defined in terms of :func:`torch.div` as + + .. code:: python + + torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer and float inputs. + + .. note:: + Complex inputs are not supported. In some cases, it is not mathematically + possible to satisfy the definition of a modulo operation with complex numbers. + See :func:`torch.fmod` for how division by zero is handled. + + .. seealso:: + + :func:`torch.fmod` which implements C++'s `std::fmod `_. + This one is defined in terms of division rounding towards zero. + + Args: + input (Tensor or Scalar): the dividend + other (Tensor or Scalar): the divisor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) + tensor([ 1., 0., 1., 1., 0., 1.]) + >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5) + tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ]) + """ + ... +@overload +def remainder(self: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + remainder(input, other, *, out=None) -> Tensor + + Computes + `Python's modulus operation `_ + entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value + is less than that of :attr:`other`. + + It may also be defined in terms of :func:`torch.div` as + + .. code:: python + + torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer and float inputs. + + .. note:: + Complex inputs are not supported. In some cases, it is not mathematically + possible to satisfy the definition of a modulo operation with complex numbers. + See :func:`torch.fmod` for how division by zero is handled. + + .. seealso:: + + :func:`torch.fmod` which implements C++'s `std::fmod `_. + This one is defined in terms of division rounding towards zero. + + Args: + input (Tensor or Scalar): the dividend + other (Tensor or Scalar): the divisor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) + tensor([ 1., 0., 1., 1., 0., 1.]) + >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5) + tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ]) + """ + ... +@overload +def remainder(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + remainder(input, other, *, out=None) -> Tensor + + Computes + `Python's modulus operation `_ + entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value + is less than that of :attr:`other`. + + It may also be defined in terms of :func:`torch.div` as + + .. code:: python + + torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer and float inputs. + + .. note:: + Complex inputs are not supported. In some cases, it is not mathematically + possible to satisfy the definition of a modulo operation with complex numbers. + See :func:`torch.fmod` for how division by zero is handled. + + .. seealso:: + + :func:`torch.fmod` which implements C++'s `std::fmod `_. + This one is defined in terms of division rounding towards zero. + + Args: + input (Tensor or Scalar): the dividend + other (Tensor or Scalar): the divisor + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) + tensor([ 1., 0., 1., 1., 0., 1.]) + >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5) + tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ]) + """ + ... +def renorm(input: Tensor, p: Union[Number, _complex], dim: _int, maxnorm: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + renorm(input, p, dim, maxnorm, *, out=None) -> Tensor + + Returns a tensor where each sub-tensor of :attr:`input` along dimension + :attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower + than the value :attr:`maxnorm` + + .. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged + + Args: + input (Tensor): the input tensor. + p (float): the power for the norm computation + dim (int): the dimension to slice over to get the sub-tensors + maxnorm (float): the maximum norm to keep each sub-tensor under + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.ones(3, 3) + >>> x[1].fill_(2) + tensor([ 2., 2., 2.]) + >>> x[2].fill_(3) + tensor([ 3., 3., 3.]) + >>> x + tensor([[ 1., 1., 1.], + [ 2., 2., 2.], + [ 3., 3., 3.]]) + >>> torch.renorm(x, 1, 0, 5) + tensor([[ 1.0000, 1.0000, 1.0000], + [ 1.6667, 1.6667, 1.6667], + [ 1.6667, 1.6667, 1.6667]]) + """ + ... +@overload +def repeat_interleave(input: Tensor, repeats: Tensor, dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor: + r""" + repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor + + Repeat elements of a tensor. + + .. warning:: + + This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``. + + Args: + input (Tensor): the input tensor. + repeats (Tensor or int): The number of repetitions for each element. + repeats is broadcasted to fit the shape of the given axis. + dim (int, optional): The dimension along which to repeat values. + By default, use the flattened input array, and return a flat output + array. + + Keyword args: + output_size (int, optional): Total output size for the given axis + ( e.g. sum of repeats). If given, it will avoid stream synchronization + needed to calculate output shape of the tensor. + + Returns: + Tensor: Repeated tensor which has the same shape as input, except along the given axis. + + Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> x.repeat_interleave(2) + tensor([1, 1, 2, 2, 3, 3]) + >>> y = torch.tensor([[1, 2], [3, 4]]) + >>> torch.repeat_interleave(y, 2) + tensor([1, 1, 2, 2, 3, 3, 4, 4]) + >>> torch.repeat_interleave(y, 3, dim=1) + tensor([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0) + tensor([[1, 2], + [3, 4], + [3, 4]]) + >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3) + tensor([[1, 2], + [3, 4], + [3, 4]]) + + If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be + `tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times, + `1` appears `n2` times, `2` appears `n3` times, etc. + + .. function:: repeat_interleave(repeats, *) -> Tensor + :noindex: + + Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc. + + Args: + repeats (Tensor): The number of repetitions for each element. + + Returns: + Tensor: Repeated tensor of size `sum(repeats)`. + + Example:: + + >>> torch.repeat_interleave(torch.tensor([1, 2, 3])) + tensor([0, 1, 1, 2, 2, 2]) + """ + ... +@overload +def repeat_interleave(repeats: Tensor, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor: + r""" + repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor + + Repeat elements of a tensor. + + .. warning:: + + This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``. + + Args: + input (Tensor): the input tensor. + repeats (Tensor or int): The number of repetitions for each element. + repeats is broadcasted to fit the shape of the given axis. + dim (int, optional): The dimension along which to repeat values. + By default, use the flattened input array, and return a flat output + array. + + Keyword args: + output_size (int, optional): Total output size for the given axis + ( e.g. sum of repeats). If given, it will avoid stream synchronization + needed to calculate output shape of the tensor. + + Returns: + Tensor: Repeated tensor which has the same shape as input, except along the given axis. + + Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> x.repeat_interleave(2) + tensor([1, 1, 2, 2, 3, 3]) + >>> y = torch.tensor([[1, 2], [3, 4]]) + >>> torch.repeat_interleave(y, 2) + tensor([1, 1, 2, 2, 3, 3, 4, 4]) + >>> torch.repeat_interleave(y, 3, dim=1) + tensor([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0) + tensor([[1, 2], + [3, 4], + [3, 4]]) + >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3) + tensor([[1, 2], + [3, 4], + [3, 4]]) + + If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be + `tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times, + `1` appears `n2` times, `2` appears `n3` times, etc. + + .. function:: repeat_interleave(repeats, *) -> Tensor + :noindex: + + Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc. + + Args: + repeats (Tensor): The number of repetitions for each element. + + Returns: + Tensor: Repeated tensor of size `sum(repeats)`. + + Example:: + + >>> torch.repeat_interleave(torch.tensor([1, 2, 3])) + tensor([0, 1, 1, 2, 2, 2]) + """ + ... +@overload +def repeat_interleave(input: Tensor, repeats: Union[_int, SymInt], dim: Optional[_int] = None, *, output_size: Optional[Union[_int, SymInt]] = None) -> Tensor: + r""" + repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor + + Repeat elements of a tensor. + + .. warning:: + + This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``. + + Args: + input (Tensor): the input tensor. + repeats (Tensor or int): The number of repetitions for each element. + repeats is broadcasted to fit the shape of the given axis. + dim (int, optional): The dimension along which to repeat values. + By default, use the flattened input array, and return a flat output + array. + + Keyword args: + output_size (int, optional): Total output size for the given axis + ( e.g. sum of repeats). If given, it will avoid stream synchronization + needed to calculate output shape of the tensor. + + Returns: + Tensor: Repeated tensor which has the same shape as input, except along the given axis. + + Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> x.repeat_interleave(2) + tensor([1, 1, 2, 2, 3, 3]) + >>> y = torch.tensor([[1, 2], [3, 4]]) + >>> torch.repeat_interleave(y, 2) + tensor([1, 1, 2, 2, 3, 3, 4, 4]) + >>> torch.repeat_interleave(y, 3, dim=1) + tensor([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0) + tensor([[1, 2], + [3, 4], + [3, 4]]) + >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3) + tensor([[1, 2], + [3, 4], + [3, 4]]) + + If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be + `tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times, + `1` appears `n2` times, `2` appears `n3` times, etc. + + .. function:: repeat_interleave(repeats, *) -> Tensor + :noindex: + + Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc. + + Args: + repeats (Tensor): The number of repetitions for each element. + + Returns: + Tensor: Repeated tensor of size `sum(repeats)`. + + Example:: + + >>> torch.repeat_interleave(torch.tensor([1, 2, 3])) + tensor([0, 1, 1, 2, 2, 2]) + """ + ... +def reshape(input: Tensor, shape: Sequence[Union[_int, SymInt]]) -> Tensor: + r""" + reshape(input, shape) -> Tensor + + Returns a tensor with the same data and number of elements as :attr:`input`, + but with the specified shape. When possible, the returned tensor will be a view + of :attr:`input`. Otherwise, it will be a copy. Contiguous inputs and inputs + with compatible strides can be reshaped without copying, but you should not + depend on the copying vs. viewing behavior. + + See :meth:`torch.Tensor.view` on when it is possible to return a view. + + A single dimension may be -1, in which case it's inferred from the remaining + dimensions and the number of elements in :attr:`input`. + + Args: + input (Tensor): the tensor to be reshaped + shape (tuple of int): the new shape + + Example:: + + >>> a = torch.arange(4.) + >>> torch.reshape(a, (2, 2)) + tensor([[ 0., 1.], + [ 2., 3.]]) + >>> b = torch.tensor([[0, 1], [2, 3]]) + >>> torch.reshape(b, (-1,)) + tensor([ 0, 1, 2, 3]) + """ + ... +def resize_as_(input: Tensor, the_template: Tensor, *, memory_format: Optional[memory_format] = None) -> Tensor: ... +def resize_as_sparse_(input: Tensor, the_template: Tensor) -> Tensor: ... +def resolve_conj(input: Tensor) -> Tensor: + r""" + resolve_conj(input) -> Tensor + + Returns a new tensor with materialized conjugation if :attr:`input`'s conjugate bit is set to `True`, + else returns :attr:`input`. The output tensor will always have its conjugate bit set to `False`. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]) + >>> y = x.conj() + >>> y.is_conj() + True + >>> z = y.resolve_conj() + >>> z + tensor([-1 - 1j, -2 - 2j, 3 + 3j]) + >>> z.is_conj() + False + """ + ... +def resolve_neg(input: Tensor) -> Tensor: + r""" + resolve_neg(input) -> Tensor + + Returns a new tensor with materialized negation if :attr:`input`'s negative bit is set to `True`, + else returns :attr:`input`. The output tensor will always have its negative bit set to `False`. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]) + >>> y = x.conj() + >>> z = y.imag + >>> z.is_neg() + True + >>> out = z.resolve_neg() + >>> out + tensor([-1., -2., 3.]) + >>> out.is_neg() + False + """ + ... +@overload +def result_type(tensor: Tensor, other: Tensor) -> _dtype: + r""" + result_type(tensor1, tensor2) -> dtype + + Returns the :class:`torch.dtype` that would result from performing an arithmetic + operation on the provided input tensors. See type promotion :ref:`documentation ` + for more information on the type promotion logic. + + Args: + tensor1 (Tensor or Number): an input tensor or number + tensor2 (Tensor or Number): an input tensor or number + + Example:: + + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0) + torch.float32 + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1)) + torch.uint8 + """ + ... +@overload +def result_type(scalar: Union[Number, _complex], tensor: Tensor) -> _dtype: + r""" + result_type(tensor1, tensor2) -> dtype + + Returns the :class:`torch.dtype` that would result from performing an arithmetic + operation on the provided input tensors. See type promotion :ref:`documentation ` + for more information on the type promotion logic. + + Args: + tensor1 (Tensor or Number): an input tensor or number + tensor2 (Tensor or Number): an input tensor or number + + Example:: + + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0) + torch.float32 + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1)) + torch.uint8 + """ + ... +@overload +def result_type(tensor: Tensor, other: Union[Number, _complex]) -> _dtype: + r""" + result_type(tensor1, tensor2) -> dtype + + Returns the :class:`torch.dtype` that would result from performing an arithmetic + operation on the provided input tensors. See type promotion :ref:`documentation ` + for more information on the type promotion logic. + + Args: + tensor1 (Tensor or Number): an input tensor or number + tensor2 (Tensor or Number): an input tensor or number + + Example:: + + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0) + torch.float32 + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1)) + torch.uint8 + """ + ... +@overload +def result_type(scalar1: Union[Number, _complex], scalar2: Union[Number, _complex]) -> _dtype: + r""" + result_type(tensor1, tensor2) -> dtype + + Returns the :class:`torch.dtype` that would result from performing an arithmetic + operation on the provided input tensors. See type promotion :ref:`documentation ` + for more information on the type promotion logic. + + Args: + tensor1 (Tensor or Number): an input tensor or number + tensor2 (Tensor or Number): an input tensor or number + + Example:: + + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0) + torch.float32 + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1)) + torch.uint8 + """ + ... +@overload +def rnn_relu(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ... +@overload +def rnn_relu(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ... +def rnn_relu_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ... +@overload +def rnn_tanh(data: Tensor, batch_sizes: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool) -> Tuple[Tensor, Tensor]: ... +@overload +def rnn_tanh(input: Tensor, hx: Tensor, params: Union[Tuple[Tensor, ...], List[Tensor]], has_biases: _bool, num_layers: _int, dropout: _float, train: _bool, bidirectional: _bool, batch_first: _bool) -> Tuple[Tensor, Tensor]: ... +def rnn_tanh_cell(input: Tensor, hx: Tensor, w_ih: Tensor, w_hh: Tensor, b_ih: Optional[Tensor] = None, b_hh: Optional[Tensor] = None) -> Tensor: ... +def roll(input: Tensor, shifts: Union[Union[_int, SymInt], Sequence[Union[_int, SymInt]]], dims: Union[_int, _size] = ()) -> Tensor: + r""" + roll(input, shifts, dims=None) -> Tensor + + Roll the tensor :attr:`input` along the given dimension(s). Elements that are + shifted beyond the last position are re-introduced at the first position. If + :attr:`dims` is `None`, the tensor will be flattened before rolling and then + restored to the original shape. + + Args: + input (Tensor): the input tensor. + shifts (int or tuple of ints): The number of places by which the elements + of the tensor are shifted. If shifts is a tuple, dims must be a tuple of + the same size, and each dimension will be rolled by the corresponding + value + dims (int or tuple of ints): Axis along which to roll + + Example:: + + >>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2) + >>> x + tensor([[1, 2], + [3, 4], + [5, 6], + [7, 8]]) + >>> torch.roll(x, 1) + tensor([[8, 1], + [2, 3], + [4, 5], + [6, 7]]) + >>> torch.roll(x, 1, 0) + tensor([[7, 8], + [1, 2], + [3, 4], + [5, 6]]) + >>> torch.roll(x, -1, 0) + tensor([[3, 4], + [5, 6], + [7, 8], + [1, 2]]) + >>> torch.roll(x, shifts=(2, 1), dims=(0, 1)) + tensor([[6, 5], + [8, 7], + [2, 1], + [4, 3]]) + """ + ... +def rot90(input: Tensor, k: _int = 1, dims: _size = (0,1)) -> Tensor: + r""" + rot90(input, k=1, dims=[0,1]) -> Tensor + + Rotate an n-D tensor by 90 degrees in the plane specified by dims axis. + Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0. + + Args: + input (Tensor): the input tensor. + k (int): number of times to rotate. Default value is 1 + dims (a list or tuple): axis to rotate. Default value is [0, 1] + + Example:: + + >>> x = torch.arange(4).view(2, 2) + >>> x + tensor([[0, 1], + [2, 3]]) + >>> torch.rot90(x, 1, [0, 1]) + tensor([[1, 3], + [0, 2]]) + + >>> x = torch.arange(8).view(2, 2, 2) + >>> x + tensor([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]) + >>> torch.rot90(x, 1, [1, 2]) + tensor([[[1, 3], + [0, 2]], + + [[5, 7], + [4, 6]]]) + """ + ... +@overload +def round(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + round(input, *, decimals=0, out=None) -> Tensor + + Rounds elements of :attr:`input` to the nearest integer. + + For integer inputs, follows the array-api convention of returning a + copy of the input tensor. + The return type of output is same as that of input's dtype. + + .. note:: + This function implements the "round half to even" to + break ties when a number is equidistant from two + integers (e.g. `round(2.5)` is 2). + + When the :attr:\`decimals\` argument is specified the + algorithm used is similar to NumPy's `around`. This + algorithm is fast but inexact and it can easily + overflow for low precision dtypes. + Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`. + + .. seealso:: + :func:`torch.ceil`, which rounds up. + :func:`torch.floor`, which rounds down. + :func:`torch.trunc`, which rounds towards zero. + + Args: + input (Tensor): the input tensor. + decimals (int): Number of decimal places to round to (default: 0). + If decimals is negative, it specifies the number of positions + to the left of the decimal point. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7))) + tensor([ 5., -2., 9., -8.]) + + >>> # Values equidistant from two integers are rounded towards the + >>> # the nearest even value (zero is treated as even) + >>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5])) + tensor([-0., 0., 2., 2.]) + + >>> # A positive decimals argument rounds to the to that decimal place + >>> torch.round(torch.tensor([0.1234567]), decimals=3) + tensor([0.1230]) + + >>> # A negative decimals argument rounds to the left of the decimal + >>> torch.round(torch.tensor([1200.1234567]), decimals=-3) + tensor([1000.]) + """ + ... +@overload +def round(input: Tensor, *, decimals: _int, out: Optional[Tensor] = None) -> Tensor: + r""" + round(input, *, decimals=0, out=None) -> Tensor + + Rounds elements of :attr:`input` to the nearest integer. + + For integer inputs, follows the array-api convention of returning a + copy of the input tensor. + The return type of output is same as that of input's dtype. + + .. note:: + This function implements the "round half to even" to + break ties when a number is equidistant from two + integers (e.g. `round(2.5)` is 2). + + When the :attr:\`decimals\` argument is specified the + algorithm used is similar to NumPy's `around`. This + algorithm is fast but inexact and it can easily + overflow for low precision dtypes. + Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`. + + .. seealso:: + :func:`torch.ceil`, which rounds up. + :func:`torch.floor`, which rounds down. + :func:`torch.trunc`, which rounds towards zero. + + Args: + input (Tensor): the input tensor. + decimals (int): Number of decimal places to round to (default: 0). + If decimals is negative, it specifies the number of positions + to the left of the decimal point. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7))) + tensor([ 5., -2., 9., -8.]) + + >>> # Values equidistant from two integers are rounded towards the + >>> # the nearest even value (zero is treated as even) + >>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5])) + tensor([-0., 0., 2., 2.]) + + >>> # A positive decimals argument rounds to the to that decimal place + >>> torch.round(torch.tensor([0.1234567]), decimals=3) + tensor([0.1230]) + + >>> # A negative decimals argument rounds to the left of the decimal + >>> torch.round(torch.tensor([1200.1234567]), decimals=-3) + tensor([1000.]) + """ + ... +@overload +def round_(input: Tensor) -> Tensor: ... +@overload +def round_(input: Tensor, *, decimals: _int) -> Tensor: ... +def row_indices_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: ... +def row_stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: + r""" + row_stack(tensors, *, out=None) -> Tensor + + Alias of :func:`torch.vstack`. + """ + ... +def rrelu(input: Tensor, lower: Union[Number, _complex] = 0.125, upper: Union[Number, _complex] = 0.3333333333333333, training: _bool = False, generator: Optional[Generator] = None) -> Tensor: ... +def rrelu_(input: Tensor, lower: Union[Number, _complex] = 0.125, upper: Union[Number, _complex] = 0.3333333333333333, training: _bool = False, generator: Optional[Generator] = None) -> Tensor: ... +def rsqrt(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + rsqrt(input, *, out=None) -> Tensor + + Returns a new tensor with the reciprocal of the square-root of each of + the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}} + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.0370, 0.2970, 1.5420, -0.9105]) + >>> torch.rsqrt(a) + tensor([ nan, 1.8351, 0.8053, nan]) + """ + ... +def rsqrt_(input: Tensor) -> Tensor: ... +@overload +def rsub(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1) -> Tensor: ... +@overload +def rsub(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: ... +def saddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Number = 1, alpha: Number = 1, out: Optional[Tensor] = None) -> Tensor: ... +def scalar_tensor(s: Union[Number, _complex], *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: ... +@overload +def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, reduce: str, out: Optional[Tensor] = None) -> Tensor: + r""" + scatter(input, dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... +@overload +def scatter(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + scatter(input, dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... +@overload +def scatter(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex], *, reduce: str, out: Optional[Tensor] = None) -> Tensor: + r""" + scatter(input, dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... +@overload +def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: + r""" + scatter(input, dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... +@overload +def scatter(input: Tensor, dim: _int, index: Tensor, value: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + scatter(input, dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... +@overload +def scatter(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, value: Union[Number, _complex]) -> Tensor: + r""" + scatter(input, dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_` + """ + ... +@overload +def scatter_add(input: Tensor, dim: _int, index: Tensor, src: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + scatter_add(input, dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_add_` + """ + ... +@overload +def scatter_add(input: Tensor, dim: Union[str, ellipsis, None], index: Tensor, src: Tensor) -> Tensor: + r""" + scatter_add(input, dim, index, src) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_add_` + """ + ... +def scatter_reduce(input: Tensor, dim: _int, index: Tensor, src: Tensor, reduce: str, *, include_self: _bool = True, out: Optional[Tensor] = None) -> Tensor: + r""" + scatter_reduce(input, dim, index, src, reduce, *, include_self=True) -> Tensor + + Out-of-place version of :meth:`torch.Tensor.scatter_reduce_` + """ + ... +@overload +def searchsorted(sorted_sequence: Tensor, input: Tensor, *, out_int32: _bool = False, right: _bool = False, side: Optional[str] = None, sorter: Optional[Tensor] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, out=None, sorter=None) -> Tensor + + Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the + corresponding values in :attr:`values` were inserted before the indices, when sorted, the order + of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved. + Return a new tensor with the same size as :attr:`values`. More formally, + the returned index satisfies the following rules: + + .. list-table:: + :widths: 12 10 78 + :header-rows: 1 + + * - :attr:`sorted_sequence` + - :attr:`right` + - *returned index satisfies* + * - 1-D + - False + - ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]`` + * - 1-D + - True + - ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]`` + * - N-D + - False + - ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]`` + * - N-D + - True + - ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]`` + + Args: + sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost* + dimension unless :attr:`sorter` is provided, in which case the sequence does not + need to be sorted + values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s). + + Keyword args: + out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise. + Default value is False, i.e. default output data type is torch.int64. + right (bool, optional): if False, return the first suitable location that is found. If True, return the + last such index. If no suitable index found, return 0 for non-numerical value + (eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence` + (one pass the last index of the *innermost* dimension). In other words, if False, + gets the lower bound index for each value in :attr:`values` on the corresponding + *innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper + bound index instead. Default value is False. :attr:`side` does the same and is + preferred. It will error if :attr:`side` is set to "left" while this is True. + side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right` + and "right" corresponds to True for :attr:`right`. It will error if this is set to + "left" while :attr:`right` is True. Default value is None. + out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided. + sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted + :attr:`sorted_sequence` containing a sequence of indices that sort it in the + ascending order on the innermost dimension + + + Example:: + + >>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]]) + >>> sorted_sequence + tensor([[ 1, 3, 5, 7, 9], + [ 2, 4, 6, 8, 10]]) + >>> values = torch.tensor([[3, 6, 9], [3, 6, 9]]) + >>> values + tensor([[3, 6, 9], + [3, 6, 9]]) + >>> torch.searchsorted(sorted_sequence, values) + tensor([[1, 3, 4], + [1, 2, 4]]) + >>> torch.searchsorted(sorted_sequence, values, side='right') + tensor([[2, 3, 5], + [1, 3, 4]]) + + >>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9]) + >>> sorted_sequence_1d + tensor([1, 3, 5, 7, 9]) + >>> torch.searchsorted(sorted_sequence_1d, values) + tensor([[1, 3, 4], + [1, 3, 4]]) + """ + ... +@overload +def searchsorted(sorted_sequence: Tensor, self: Union[Number, _complex], *, out_int32: _bool = False, right: _bool = False, side: Optional[str] = None, sorter: Optional[Tensor] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, out=None, sorter=None) -> Tensor + + Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the + corresponding values in :attr:`values` were inserted before the indices, when sorted, the order + of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved. + Return a new tensor with the same size as :attr:`values`. More formally, + the returned index satisfies the following rules: + + .. list-table:: + :widths: 12 10 78 + :header-rows: 1 + + * - :attr:`sorted_sequence` + - :attr:`right` + - *returned index satisfies* + * - 1-D + - False + - ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]`` + * - 1-D + - True + - ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]`` + * - N-D + - False + - ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]`` + * - N-D + - True + - ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]`` + + Args: + sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost* + dimension unless :attr:`sorter` is provided, in which case the sequence does not + need to be sorted + values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s). + + Keyword args: + out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise. + Default value is False, i.e. default output data type is torch.int64. + right (bool, optional): if False, return the first suitable location that is found. If True, return the + last such index. If no suitable index found, return 0 for non-numerical value + (eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence` + (one pass the last index of the *innermost* dimension). In other words, if False, + gets the lower bound index for each value in :attr:`values` on the corresponding + *innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper + bound index instead. Default value is False. :attr:`side` does the same and is + preferred. It will error if :attr:`side` is set to "left" while this is True. + side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right` + and "right" corresponds to True for :attr:`right`. It will error if this is set to + "left" while :attr:`right` is True. Default value is None. + out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided. + sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted + :attr:`sorted_sequence` containing a sequence of indices that sort it in the + ascending order on the innermost dimension + + + Example:: + + >>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]]) + >>> sorted_sequence + tensor([[ 1, 3, 5, 7, 9], + [ 2, 4, 6, 8, 10]]) + >>> values = torch.tensor([[3, 6, 9], [3, 6, 9]]) + >>> values + tensor([[3, 6, 9], + [3, 6, 9]]) + >>> torch.searchsorted(sorted_sequence, values) + tensor([[1, 3, 4], + [1, 2, 4]]) + >>> torch.searchsorted(sorted_sequence, values, side='right') + tensor([[2, 3, 5], + [1, 3, 4]]) + + >>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9]) + >>> sorted_sequence_1d + tensor([1, 3, 5, 7, 9]) + >>> torch.searchsorted(sorted_sequence_1d, values) + tensor([[1, 3, 4], + [1, 3, 4]]) + """ + ... +def segment_reduce(data: Tensor, reduce: str, *, lengths: Optional[Tensor] = None, indices: Optional[Tensor] = None, offsets: Optional[Tensor] = None, axis: _int = 0, unsafe: _bool = False, initial: Optional[Union[Number, _complex]] = None) -> Tensor: ... +@overload +def select(input: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor: + r""" + select(input, dim, index) -> Tensor + + Slices the :attr:`input` tensor along the selected dimension at the given index. + This function returns a view of the original tensor with the given dimension removed. + + .. note:: If :attr:`input` is a sparse tensor and returning a view of + the tensor is not possible, a RuntimeError exception is + raised. In this is the case, consider using + :func:`torch.select_copy` function. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to slice + index (int): the index to select with + + .. note:: + + :meth:`select` is equivalent to slicing. For example, + ``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and + ``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``. + """ + ... +@overload +def select(input: Tensor, dim: Union[str, ellipsis, None], index: _int) -> Tensor: + r""" + select(input, dim, index) -> Tensor + + Slices the :attr:`input` tensor along the selected dimension at the given index. + This function returns a view of the original tensor with the given dimension removed. + + .. note:: If :attr:`input` is a sparse tensor and returning a view of + the tensor is not possible, a RuntimeError exception is + raised. In this is the case, consider using + :func:`torch.select_copy` function. + + Args: + input (Tensor): the input tensor. + dim (int): the dimension to slice + index (int): the index to select with + + .. note:: + + :meth:`select` is equivalent to slicing. For example, + ``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and + ``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``. + """ + ... +def select_copy(input: Tensor, dim: _int, index: Union[_int, SymInt], *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.select`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def select_scatter(input: Tensor, src: Tensor, dim: _int, index: Union[_int, SymInt]) -> Tensor: + r""" + select_scatter(input, src, dim, index) -> Tensor + + Embeds the values of the :attr:`src` tensor into :attr:`input` at the given index. + This function returns a tensor with fresh storage; it does not create a view. + + + Args: + input (Tensor): the input tensor. + src (Tensor): The tensor to embed into :attr:`input` + dim (int): the dimension to insert the slice into. + index (int): the index to select with + + .. note:: + + :attr:`src` must be of the proper size in order to be embedded + into :attr:`input`. Specifically, it should have the same shape as + ``torch.select(input, dim, index)`` + + Example:: + + >>> a = torch.zeros(2, 2) + >>> b = torch.ones(2) + >>> a.select_scatter(b, 0, 0) + tensor([[1., 1.], + [0., 0.]]) + """ + ... +def selu(input: Tensor) -> Tensor: ... +def selu_(input: Tensor) -> Tensor: ... +def set_flush_denormal(mode: _bool) -> _bool: + r""" + set_flush_denormal(mode) -> bool + + Disables denormal floating numbers on CPU. + + Returns ``True`` if your system supports flushing denormal numbers and it + successfully configures flush denormal mode. :meth:`~torch.set_flush_denormal` + is supported on x86 architectures supporting SSE3 and AArch64 architecture. + + Args: + mode (bool): Controls whether to enable flush denormal mode or not + + Example:: + + >>> torch.set_flush_denormal(True) + True + >>> torch.tensor([1e-323], dtype=torch.float64) + tensor([ 0.], dtype=torch.float64) + >>> torch.set_flush_denormal(False) + True + >>> torch.tensor([1e-323], dtype=torch.float64) + tensor(9.88131e-324 * + [ 1.0000], dtype=torch.float64) + """ + ... +def set_num_interop_threads(num: _int) -> None: + r""" + set_num_interop_threads(int) + + Sets the number of threads used for interop parallelism + (e.g. in JIT interpreter) on CPU. + + .. warning:: + Can only be called once and before any inter-op parallel work + is started (e.g. JIT execution). + """ + ... +def set_num_threads(num: _int) -> None: + r""" + set_num_threads(int) + + Sets the number of threads used for intraop parallelism on CPU. + + .. warning:: + To ensure that the correct number of threads is used, set_num_threads + must be called before running eager, JIT or autograd code. + """ + ... +def sgn(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + sgn(input, *, out=None) -> Tensor + + This function is an extension of torch.sign() to complex tensors. + It computes a new tensor whose elements have + the same angles as the corresponding elements of :attr:`input` and + absolute values (i.e. magnitudes) of one for complex tensors and + is equivalent to torch.sign() for non-complex tensors. + + .. math:: + \text{out}_{i} = \begin{cases} + 0 & |\text{{input}}_i| == 0 \\ + \frac{{\text{{input}}_i}}{|{\text{{input}}_i}|} & \text{otherwise} + \end{cases} + + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.tensor([3+4j, 7-24j, 0, 1+2j]) + >>> t.sgn() + tensor([0.6000+0.8000j, 0.2800-0.9600j, 0.0000+0.0000j, 0.4472+0.8944j]) + """ + ... +def sigmoid(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + sigmoid(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.expit`. + """ + ... +def sigmoid_(input: Tensor) -> Tensor: ... +def sign(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + sign(input, *, out=None) -> Tensor + + Returns a new tensor with the signs of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \operatorname{sgn}(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([0.7, -1.2, 0., 2.3]) + >>> a + tensor([ 0.7000, -1.2000, 0.0000, 2.3000]) + >>> torch.sign(a) + tensor([ 1., -1., 0., 1.]) + """ + ... +def signbit(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + signbit(input, *, out=None) -> Tensor + + Tests if each element of :attr:`input` has its sign bit set or not. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([0.7, -1.2, 0., 2.3]) + >>> torch.signbit(a) + tensor([ False, True, False, False]) + >>> a = torch.tensor([-0.0, 0.0]) + >>> torch.signbit(a) + tensor([ True, False]) + + .. note:: + signbit handles signed zeros, so negative zero (-0) returns True. + """ + ... +def sin(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + sin(input, *, out=None) -> Tensor + + Returns a new tensor with the sine of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \sin(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.5461, 0.1347, -2.7266, -0.2746]) + >>> torch.sin(a) + tensor([-0.5194, 0.1343, -0.4032, -0.2711]) + """ + ... +def sin_(input: Tensor) -> Tensor: ... +def sinc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + sinc(input, *, out=None) -> Tensor + + Alias for :func:`torch.special.sinc`. + """ + ... +def sinc_(input: Tensor) -> Tensor: ... +def sinh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + sinh(input, *, out=None) -> Tensor + + Returns a new tensor with the hyperbolic sine of the elements of + :attr:`input`. + + .. math:: + \text{out}_{i} = \sinh(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.5380, -0.8632, -0.1265, 0.9399]) + >>> torch.sinh(a) + tensor([ 0.5644, -0.9744, -0.1268, 1.0845]) + + .. note:: + When :attr:`input` is on the CPU, the implementation of torch.sinh may use + the Sleef library, which rounds very large results to infinity or negative + infinity. See `here `_ for details. + """ + ... +def sinh_(input: Tensor) -> Tensor: ... +def slice_copy(input: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.slice`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def slice_inverse(input: Tensor, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1) -> Tensor: ... +def slice_scatter(input: Tensor, src: Tensor, dim: _int = 0, start: Optional[Union[_int, SymInt]] = None, end: Optional[Union[_int, SymInt]] = None, step: Union[_int, SymInt] = 1, *, out: Optional[Tensor] = None) -> Tensor: + r""" + slice_scatter(input, src, dim=0, start=None, end=None, step=1) -> Tensor + + Embeds the values of the :attr:`src` tensor into :attr:`input` at the given + dimension. + This function returns a tensor with fresh storage; it does not create a view. + + + Args: + input (Tensor): the input tensor. + src (Tensor): The tensor to embed into :attr:`input` + dim (int): the dimension to insert the slice into + start (Optional[int]): the start index of where to insert the slice + end (Optional[int]): the end index of where to insert the slice + step (int): the how many elements to skip in + + Example:: + + >>> a = torch.zeros(8, 8) + >>> b = torch.ones(2, 8) + >>> a.slice_scatter(b, start=6) + tensor([[0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [1., 1., 1., 1., 1., 1., 1., 1.], + [1., 1., 1., 1., 1., 1., 1., 1.]]) + + >>> b = torch.ones(8, 2) + >>> a.slice_scatter(b, dim=1, start=2, end=6, step=2) + tensor([[0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.]]) + """ + ... +def slogdet(input: Tensor, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.slogdet: + r""" + slogdet(input) -> (Tensor, Tensor) + + Alias for :func:`torch.linalg.slogdet` + """ + ... +def smm(input: Tensor, mat2: Tensor) -> Tensor: + r""" + smm(input, mat) -> Tensor + + Performs a matrix multiplication of the sparse matrix :attr:`input` + with the dense matrix :attr:`mat`. + + Args: + input (Tensor): a sparse matrix to be matrix multiplied + mat (Tensor): a dense matrix to be matrix multiplied + """ + ... +@overload +def softmax(input: Tensor, dim: _int, dtype: Optional[_dtype] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + softmax(input, dim, *, dtype=None) -> Tensor + + Alias for :func:`torch.nn.functional.softmax`. + """ + ... +@overload +def softmax(input: Tensor, dim: Union[str, ellipsis, None], *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + softmax(input, dim, *, dtype=None) -> Tensor + + Alias for :func:`torch.nn.functional.softmax`. + """ + ... +@overload +def sort(input: Tensor, *, stable: Optional[_bool], dim: _int = -1, descending: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort: + r""" + sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor) + + Sorts the elements of the :attr:`input` tensor along a given dimension + in ascending order by value. + + If :attr:`dim` is not given, the last dimension of the `input` is chosen. + + If :attr:`descending` is ``True`` then the elements are sorted in descending + order by value. + + If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving + the order of equivalent elements. + + A namedtuple of (values, indices) is returned, where the `values` are the + sorted values and `indices` are the indices of the elements in the original + `input` tensor. + + Args: + input (Tensor): the input tensor. + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): makes the sorting routine stable, which guarantees that the order + of equivalent elements is preserved. + + Keyword args: + out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can + be optionally given to be used as output buffers + + Example:: + + >>> x = torch.randn(3, 4) + >>> sorted, indices = torch.sort(x) + >>> sorted + tensor([[-0.2162, 0.0608, 0.6719, 2.3332], + [-0.5793, 0.0061, 0.6058, 0.9497], + [-0.5071, 0.3343, 0.9553, 1.0960]]) + >>> indices + tensor([[ 1, 0, 2, 3], + [ 3, 1, 0, 2], + [ 0, 3, 1, 2]]) + + >>> sorted, indices = torch.sort(x, 0) + >>> sorted + tensor([[-0.5071, -0.2162, 0.6719, -0.5793], + [ 0.0608, 0.0061, 0.9497, 0.3343], + [ 0.6058, 0.9553, 1.0960, 2.3332]]) + >>> indices + tensor([[ 2, 0, 0, 1], + [ 0, 1, 1, 2], + [ 1, 2, 2, 0]]) + >>> x = torch.tensor([0, 1] * 9) + >>> x.sort() + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1])) + >>> x.sort(stable=True) + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17])) + """ + ... +@overload +def sort(input: Tensor, dim: _int = -1, descending: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort: + r""" + sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor) + + Sorts the elements of the :attr:`input` tensor along a given dimension + in ascending order by value. + + If :attr:`dim` is not given, the last dimension of the `input` is chosen. + + If :attr:`descending` is ``True`` then the elements are sorted in descending + order by value. + + If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving + the order of equivalent elements. + + A namedtuple of (values, indices) is returned, where the `values` are the + sorted values and `indices` are the indices of the elements in the original + `input` tensor. + + Args: + input (Tensor): the input tensor. + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): makes the sorting routine stable, which guarantees that the order + of equivalent elements is preserved. + + Keyword args: + out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can + be optionally given to be used as output buffers + + Example:: + + >>> x = torch.randn(3, 4) + >>> sorted, indices = torch.sort(x) + >>> sorted + tensor([[-0.2162, 0.0608, 0.6719, 2.3332], + [-0.5793, 0.0061, 0.6058, 0.9497], + [-0.5071, 0.3343, 0.9553, 1.0960]]) + >>> indices + tensor([[ 1, 0, 2, 3], + [ 3, 1, 0, 2], + [ 0, 3, 1, 2]]) + + >>> sorted, indices = torch.sort(x, 0) + >>> sorted + tensor([[-0.5071, -0.2162, 0.6719, -0.5793], + [ 0.0608, 0.0061, 0.9497, 0.3343], + [ 0.6058, 0.9553, 1.0960, 2.3332]]) + >>> indices + tensor([[ 2, 0, 0, 1], + [ 0, 1, 1, 2], + [ 1, 2, 2, 0]]) + >>> x = torch.tensor([0, 1] * 9) + >>> x.sort() + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1])) + >>> x.sort(stable=True) + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17])) + """ + ... +@overload +def sort(input: Tensor, *, stable: Optional[_bool], dim: Union[str, ellipsis, None], descending: _bool = False, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort: + r""" + sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor) + + Sorts the elements of the :attr:`input` tensor along a given dimension + in ascending order by value. + + If :attr:`dim` is not given, the last dimension of the `input` is chosen. + + If :attr:`descending` is ``True`` then the elements are sorted in descending + order by value. + + If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving + the order of equivalent elements. + + A namedtuple of (values, indices) is returned, where the `values` are the + sorted values and `indices` are the indices of the elements in the original + `input` tensor. + + Args: + input (Tensor): the input tensor. + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): makes the sorting routine stable, which guarantees that the order + of equivalent elements is preserved. + + Keyword args: + out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can + be optionally given to be used as output buffers + + Example:: + + >>> x = torch.randn(3, 4) + >>> sorted, indices = torch.sort(x) + >>> sorted + tensor([[-0.2162, 0.0608, 0.6719, 2.3332], + [-0.5793, 0.0061, 0.6058, 0.9497], + [-0.5071, 0.3343, 0.9553, 1.0960]]) + >>> indices + tensor([[ 1, 0, 2, 3], + [ 3, 1, 0, 2], + [ 0, 3, 1, 2]]) + + >>> sorted, indices = torch.sort(x, 0) + >>> sorted + tensor([[-0.5071, -0.2162, 0.6719, -0.5793], + [ 0.0608, 0.0061, 0.9497, 0.3343], + [ 0.6058, 0.9553, 1.0960, 2.3332]]) + >>> indices + tensor([[ 2, 0, 0, 1], + [ 0, 1, 1, 2], + [ 1, 2, 2, 0]]) + >>> x = torch.tensor([0, 1] * 9) + >>> x.sort() + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1])) + >>> x.sort(stable=True) + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17])) + """ + ... +@overload +def sort(input: Tensor, dim: Union[str, ellipsis, None], descending: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.sort: + r""" + sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor) + + Sorts the elements of the :attr:`input` tensor along a given dimension + in ascending order by value. + + If :attr:`dim` is not given, the last dimension of the `input` is chosen. + + If :attr:`descending` is ``True`` then the elements are sorted in descending + order by value. + + If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving + the order of equivalent elements. + + A namedtuple of (values, indices) is returned, where the `values` are the + sorted values and `indices` are the indices of the elements in the original + `input` tensor. + + Args: + input (Tensor): the input tensor. + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): makes the sorting routine stable, which guarantees that the order + of equivalent elements is preserved. + + Keyword args: + out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can + be optionally given to be used as output buffers + + Example:: + + >>> x = torch.randn(3, 4) + >>> sorted, indices = torch.sort(x) + >>> sorted + tensor([[-0.2162, 0.0608, 0.6719, 2.3332], + [-0.5793, 0.0061, 0.6058, 0.9497], + [-0.5071, 0.3343, 0.9553, 1.0960]]) + >>> indices + tensor([[ 1, 0, 2, 3], + [ 3, 1, 0, 2], + [ 0, 3, 1, 2]]) + + >>> sorted, indices = torch.sort(x, 0) + >>> sorted + tensor([[-0.5071, -0.2162, 0.6719, -0.5793], + [ 0.0608, 0.0061, 0.9497, 0.3343], + [ 0.6058, 0.9553, 1.0960, 2.3332]]) + >>> indices + tensor([[ 2, 0, 0, 1], + [ 0, 1, 1, 2], + [ 1, 2, 2, 0]]) + >>> x = torch.tensor([0, 1] * 9) + >>> x.sort() + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1])) + >>> x.sort(stable=True) + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17])) + """ + ... +def sparse_bsc_tensor(ccol_indices: Union[Tensor, List], row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: + r""" + sparse_bsc_tensor(ccol_indices, row_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + + Constructs a :ref:`sparse tensor in BSC (Block Compressed Sparse + Column)) ` with specified 2-dimensional blocks at the + given :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix + multiplication operations in BSC format are typically faster than that + for sparse tensors in COO format. Make you have a look at :ref:`the + note on the data type of the indices `. + + .. note:: + + If the ``device`` argument is not specified the device of the given + :attr:`values` and indices tensor(s) must match. If, however, the + argument is specified the input Tensors will be converted to the + given device and in turn determine the device of the constructed + sparse tensor. + + Args: + ccol_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, ncolblocks + 1)``. The last element of each + batch is the number of non-zeros. This tensor encodes the + index in values and row_indices depending on where the given + column starts. Each successive number in the tensor subtracted + by the number before it denotes the number of elements in a + given column. + row_indices (array_like): Row block co-ordinates of each block in + values. (B+1)-dimensional tensor with the same length + as values. + values (array_list): Initial blocks for the tensor. Can be a list, + tuple, NumPy ``ndarray``, and other types that + represents a (1 + 2 + K)-dimensional tensor where ``K`` is the + number of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols * + blocksize[1], *densesize)`` If not provided, the size will be + inferred as the minimum size big enough to hold all non-zero + blocks. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_device`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + check_invariants (bool, optional): If sparse tensor invariants are checked. + Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, + initially False. + + Example:: + >>> ccol_indices = [0, 1, 2] + >>> row_indices = [0, 1] + >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + >>> torch.sparse_bsc_tensor(torch.tensor(ccol_indices, dtype=torch.int64), + ... torch.tensor(row_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double) + tensor(ccol_indices=tensor([0, 1, 2]), + row_indices=tensor([0, 1]), + values=tensor([[[1., 2.], + [3., 4.]], + [[5., 6.], + [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64, + layout=torch.sparse_bsc) + """ + ... +def sparse_bsr_tensor(crow_indices: Union[Tensor, List], col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: + r""" + sparse_bsr_tensor(crow_indices, col_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + + Constructs a :ref:`sparse tensor in BSR (Block Compressed Sparse Row)) + ` with specified 2-dimensional blocks at the given + :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix + multiplication operations in BSR format are typically faster than that + for sparse tensors in COO format. Make you have a look at :ref:`the + note on the data type of the indices `. + + .. note:: + + If the ``device`` argument is not specified the device of the given + :attr:`values` and indices tensor(s) must match. If, however, the + argument is specified the input Tensors will be converted to the + given device and in turn determine the device of the constructed + sparse tensor. + + Args: + crow_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, nrowblocks + 1)``. The last element of each + batch is the number of non-zeros. This tensor encodes the + block index in values and col_indices depending on where the + given row block starts. Each successive number in the tensor + subtracted by the number before it denotes the number of + blocks in a given row. + col_indices (array_like): Column block co-ordinates of each block + in values. (B+1)-dimensional tensor with the same length as + values. + values (array_list): Initial values for the tensor. Can be a list, + tuple, NumPy ``ndarray``, scalar, and other types that + represents a (1 + 2 + K)-dimensional tensor where ``K`` is the + number of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols * + blocksize[1], *densesize)`` where ``blocksize == + values.shape[1:3]``. If not provided, the size will be + inferred as the minimum size big enough to hold all non-zero + blocks. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_device`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + check_invariants (bool, optional): If sparse tensor invariants are checked. + Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, + initially False. + + Example:: + >>> crow_indices = [0, 1, 2] + >>> col_indices = [0, 1] + >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + >>> torch.sparse_bsr_tensor(torch.tensor(crow_indices, dtype=torch.int64), + ... torch.tensor(col_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double) + tensor(crow_indices=tensor([0, 1, 2]), + col_indices=tensor([0, 1]), + values=tensor([[[1., 2.], + [3., 4.]], + [[5., 6.], + [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64, + layout=torch.sparse_bsr) + """ + ... +def sparse_compressed_tensor(compressed_indices: Union[Tensor, List], plain_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: + r""" + sparse_compressed_tensor(compressed_indices, plain_indices, values, size=None, *, dtype=None, layout=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + + Constructs a :ref:`sparse tensor in Compressed Sparse format - CSR, + CSC, BSR, or BSC - ` with specified values at + the given :attr:`compressed_indices` and :attr:`plain_indices`. Sparse + matrix multiplication operations in Compressed Sparse format are + typically faster than that for sparse tensors in COO format. Make you + have a look at :ref:`the note on the data type of the indices + `. + + .. note:: + + If the ``device`` argument is not specified the device of the given + :attr:`values` and indices tensor(s) must match. If, however, the + argument is specified the input Tensors will be converted to the + given device and in turn determine the device of the constructed + sparse tensor. + + Args: + compressed_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, compressed_dim_size + 1)``. The last element of + each batch is the number of non-zero elements or blocks. This + tensor encodes the index in ``values`` and ``plain_indices`` + depending on where the given compressed dimension (row or + column) starts. Each successive number in the tensor + subtracted by the number before it denotes the number of + elements or blocks in a given compressed dimension. + plain_indices (array_like): Plain dimension (column or row) + co-ordinates of each element or block in values. (B+1)-dimensional + tensor with the same length as values. + + values (array_list): Initial values for the tensor. Can be a list, + tuple, NumPy ``ndarray``, scalar, and other types. that + represents a (1+K)-dimensional (for CSR and CSC layouts) or + (1+2+K)-dimensional tensor (for BSR and BSC layouts) where + ``K`` is the number of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols * + blocksize[1], *densesize)`` where ``blocksize[0] == + blocksize[1] == 1`` for CSR and CSC formats. If not provided, + the size will be inferred as the minimum size big enough to + hold all non-zero elements or blocks. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + layout (:class:`torch.layout`, required): the desired layout of + returned tensor: :attr:`torch.sparse_csr`, + :attr:`torch.sparse_csc`, :attr:`torch.sparse_bsr`, or + :attr:`torch.sparse_bsc`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_device`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + check_invariants (bool, optional): If sparse tensor invariants are checked. + Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, + initially False. + + Example:: + >>> compressed_indices = [0, 2, 4] + >>> plain_indices = [0, 1, 0, 1] + >>> values = [1, 2, 3, 4] + >>> torch.sparse_compressed_tensor(torch.tensor(compressed_indices, dtype=torch.int64), + ... torch.tensor(plain_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double, layout=torch.sparse_csr) + tensor(crow_indices=tensor([0, 2, 4]), + col_indices=tensor([0, 1, 0, 1]), + values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4, + dtype=torch.float64, layout=torch.sparse_csr) + """ + ... +def sparse_coo_tensor(indices: Tensor, values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None, is_coalesced: Optional[_bool] = None) -> Tensor: + r""" + sparse_coo_tensor(indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None, is_coalesced=None) -> Tensor + + Constructs a :ref:`sparse tensor in COO(rdinate) format + ` with specified values at the given + :attr:`indices`. + + .. note:: + + This function returns an :ref:`uncoalesced tensor + ` when :attr:`is_coalesced` is + unspecified or ``None``. + + .. note:: + + If the ``device`` argument is not specified the device of the given + :attr:`values` and indices tensor(s) must match. If, however, the + argument is specified the input Tensors will be converted to the + given device and in turn determine the device of the constructed + sparse tensor. + + Args: + indices (array_like): Initial data for the tensor. Can be a list, tuple, + NumPy ``ndarray``, scalar, and other types. Will be cast to a :class:`torch.LongTensor` + internally. The indices are the coordinates of the non-zero values in the matrix, and thus + should be two-dimensional where the first dimension is the number of tensor dimensions and + the second dimension is the number of non-zero values. + values (array_like): Initial values for the tensor. Can be a list, tuple, + NumPy ``ndarray``, scalar, and other types. + size (list, tuple, or :class:`torch.Size`, optional): Size of the sparse tensor. If not + provided the size will be inferred as the minimum size big enough to hold all non-zero + elements. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if None, infers data type from :attr:`values`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if None, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + check_invariants (bool, optional): If sparse tensor invariants are checked. + Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, + initially False. + is_coalesced (bool, optional): When``True``, the caller is + responsible for providing tensor indices that correspond to a + coalesced tensor. If the :attr:`check_invariants` flag is + False, no error will be raised if the prerequisites are not + met and this will lead to silently incorrect results. To force + coalescion please use :meth:`coalesce` on the resulting + Tensor. + Default: None: except for trivial cases (e.g. nnz < 2) the + resulting Tensor has is_coalesced set to ``False```. + + Example:: + + >>> i = torch.tensor([[0, 1, 1], + ... [2, 0, 2]]) + >>> v = torch.tensor([3, 4, 5], dtype=torch.float32) + >>> torch.sparse_coo_tensor(i, v, [2, 4]) + tensor(indices=tensor([[0, 1, 1], + [2, 0, 2]]), + values=tensor([3., 4., 5.]), + size=(2, 4), nnz=3, layout=torch.sparse_coo) + + >>> torch.sparse_coo_tensor(i, v) # Shape inference + tensor(indices=tensor([[0, 1, 1], + [2, 0, 2]]), + values=tensor([3., 4., 5.]), + size=(2, 3), nnz=3, layout=torch.sparse_coo) + + >>> torch.sparse_coo_tensor(i, v, [2, 4], + ... dtype=torch.float64, + ... device=torch.device('cuda:0')) + tensor(indices=tensor([[0, 1, 1], + [2, 0, 2]]), + values=tensor([3., 4., 5.]), + device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64, + layout=torch.sparse_coo) + + # Create an empty sparse tensor with the following invariants: + # 1. sparse_dim + dense_dim = len(SparseTensor.shape) + # 2. SparseTensor._indices().shape = (sparse_dim, nnz) + # 3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:]) + # + # For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and + # sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0)) + >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1]) + tensor(indices=tensor([], size=(1, 0)), + values=tensor([], size=(0,)), + size=(1,), nnz=0, layout=torch.sparse_coo) + + # and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and + # sparse_dim = 1 + >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2]) + tensor(indices=tensor([], size=(1, 0)), + values=tensor([], size=(0, 2)), + size=(1, 2), nnz=0, layout=torch.sparse_coo) + + .. _torch.sparse: https://pytorch.org/docs/stable/sparse.html + """ + ... +def sparse_csc_tensor(ccol_indices: Union[Tensor, List], row_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: + r""" + sparse_csc_tensor(ccol_indices, row_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + + Constructs a :ref:`sparse tensor in CSC (Compressed Sparse Column) + ` with specified values at the given + :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix + multiplication operations in CSC format are typically faster than that + for sparse tensors in COO format. Make you have a look at :ref:`the + note on the data type of the indices `. + + .. note:: + + If the ``device`` argument is not specified the device of the given + :attr:`values` and indices tensor(s) must match. If, however, the + argument is specified the input Tensors will be converted to the + given device and in turn determine the device of the constructed + sparse tensor. + + Args: + ccol_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, ncols + 1)``. The last element of each batch + is the number of non-zeros. This tensor encodes the index in + values and row_indices depending on where the given column + starts. Each successive number in the tensor subtracted by the + number before it denotes the number of elements in a given + column. + row_indices (array_like): Row co-ordinates of each element in + values. (B+1)-dimensional tensor with the same length as + values. + values (array_list): Initial values for the tensor. Can be a list, + tuple, NumPy ``ndarray``, scalar, and other types that + represents a (1+K)-dimensional tensor where ``K`` is the number + of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If + not provided, the size will be inferred as the minimum size + big enough to hold all non-zero elements. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_device`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + check_invariants (bool, optional): If sparse tensor invariants are checked. + Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, + initially False. + + Example:: + >>> ccol_indices = [0, 2, 4] + >>> row_indices = [0, 1, 0, 1] + >>> values = [1, 2, 3, 4] + >>> torch.sparse_csc_tensor(torch.tensor(ccol_indices, dtype=torch.int64), + ... torch.tensor(row_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double) + tensor(ccol_indices=tensor([0, 2, 4]), + row_indices=tensor([0, 1, 0, 1]), + values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4, + dtype=torch.float64, layout=torch.sparse_csc) + """ + ... +def sparse_csr_tensor(crow_indices: Union[Tensor, List], col_indices: Union[Tensor, List], values: Union[Tensor, List], size: Optional[_size] = None, *, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, check_invariants: Optional[_bool] = None) -> Tensor: + r""" + sparse_csr_tensor(crow_indices, col_indices, values, size=None, *, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + + Constructs a :ref:`sparse tensor in CSR (Compressed Sparse Row) ` with specified + values at the given :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix multiplication operations + in CSR format are typically faster than that for sparse tensors in COO format. Make you have a look + at :ref:`the note on the data type of the indices `. + + .. note:: + + If the ``device`` argument is not specified the device of the given + :attr:`values` and indices tensor(s) must match. If, however, the + argument is specified the input Tensors will be converted to the + given device and in turn determine the device of the constructed + sparse tensor. + + Args: + crow_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, nrows + 1)``. The last element of each batch + is the number of non-zeros. This tensor encodes the index in + values and col_indices depending on where the given row + starts. Each successive number in the tensor subtracted by the + number before it denotes the number of elements in a given + row. + col_indices (array_like): Column co-ordinates of each element in + values. (B+1)-dimensional tensor with the same length + as values. + values (array_list): Initial values for the tensor. Can be a list, + tuple, NumPy ``ndarray``, scalar, and other types that + represents a (1+K)-dimensional tensor where ``K`` is the number + of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If + not provided, the size will be inferred as the minimum size + big enough to hold all non-zero elements. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_device`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + check_invariants (bool, optional): If sparse tensor invariants are checked. + Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, + initially False. + + Example:: + >>> crow_indices = [0, 2, 4] + >>> col_indices = [0, 1, 0, 1] + >>> values = [1, 2, 3, 4] + >>> torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64), + ... torch.tensor(col_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double) + tensor(crow_indices=tensor([0, 2, 4]), + col_indices=tensor([0, 1, 0, 1]), + values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4, + dtype=torch.float64, layout=torch.sparse_csr) + """ + ... +def split_copy(input: Tensor, split_size: Union[_int, SymInt], dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None: + r""" + Performs the same operation as :func:`torch.split`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: ... +def split_with_sizes_copy(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None: + r""" + Performs the same operation as :func:`torch.split_with_sizes`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def spmm(input: Tensor, mat2: Tensor) -> Tensor: ... +def sqrt(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + sqrt(input, *, out=None) -> Tensor + + Returns a new tensor with the square-root of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \sqrt{\text{input}_{i}} + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-2.0755, 1.0226, 0.0831, 0.4806]) + >>> torch.sqrt(a) + tensor([ nan, 1.0112, 0.2883, 0.6933]) + """ + ... +def sqrt_(input: Tensor) -> Tensor: ... +def square(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + square(input, *, out=None) -> Tensor + + Returns a new tensor with the square of the elements of :attr:`input`. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-2.0755, 1.0226, 0.0831, 0.4806]) + >>> torch.square(a) + tensor([ 4.3077, 1.0457, 0.0069, 0.2310]) + """ + ... +def square_(input: Tensor) -> Tensor: ... +@overload +def squeeze(input: Tensor) -> Tensor: + r""" + squeeze(input, dim=None) -> Tensor + + Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed. + + For example, if `input` is of shape: + :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()` + will be of shape: :math:`(A \times B \times C \times D)`. + + When :attr:`dim` is given, a squeeze operation is done only in the given + dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`, + ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)`` + will squeeze the tensor to the shape :math:`(A \times B)`. + + .. note:: The returned tensor shares the storage with the input tensor, + so changing the contents of one will change the contents of the other. + + .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)` + will also remove the batch dimension, which can lead to unexpected + errors. Consider specifying only the dims you wish to be squeezed. + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints, optional): if given, the input will be squeezed + only in the specified dimensions. + + .. versionchanged:: 2.0 + :attr:`dim` now accepts tuples of dimensions. + + Example:: + + >>> x = torch.zeros(2, 1, 2, 1, 2) + >>> x.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x) + >>> y.size() + torch.Size([2, 2, 2]) + >>> y = torch.squeeze(x, 0) + >>> y.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x, 1) + >>> y.size() + torch.Size([2, 2, 1, 2]) + >>> y = torch.squeeze(x, (1, 2, 3)) + torch.Size([2, 2, 2]) + """ + ... +@overload +def squeeze(input: Tensor, dim: _int) -> Tensor: + r""" + squeeze(input, dim=None) -> Tensor + + Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed. + + For example, if `input` is of shape: + :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()` + will be of shape: :math:`(A \times B \times C \times D)`. + + When :attr:`dim` is given, a squeeze operation is done only in the given + dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`, + ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)`` + will squeeze the tensor to the shape :math:`(A \times B)`. + + .. note:: The returned tensor shares the storage with the input tensor, + so changing the contents of one will change the contents of the other. + + .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)` + will also remove the batch dimension, which can lead to unexpected + errors. Consider specifying only the dims you wish to be squeezed. + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints, optional): if given, the input will be squeezed + only in the specified dimensions. + + .. versionchanged:: 2.0 + :attr:`dim` now accepts tuples of dimensions. + + Example:: + + >>> x = torch.zeros(2, 1, 2, 1, 2) + >>> x.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x) + >>> y.size() + torch.Size([2, 2, 2]) + >>> y = torch.squeeze(x, 0) + >>> y.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x, 1) + >>> y.size() + torch.Size([2, 2, 1, 2]) + >>> y = torch.squeeze(x, (1, 2, 3)) + torch.Size([2, 2, 2]) + """ + ... +@overload +def squeeze(input: Tensor, dim: _size) -> Tensor: + r""" + squeeze(input, dim=None) -> Tensor + + Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed. + + For example, if `input` is of shape: + :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()` + will be of shape: :math:`(A \times B \times C \times D)`. + + When :attr:`dim` is given, a squeeze operation is done only in the given + dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`, + ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)`` + will squeeze the tensor to the shape :math:`(A \times B)`. + + .. note:: The returned tensor shares the storage with the input tensor, + so changing the contents of one will change the contents of the other. + + .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)` + will also remove the batch dimension, which can lead to unexpected + errors. Consider specifying only the dims you wish to be squeezed. + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints, optional): if given, the input will be squeezed + only in the specified dimensions. + + .. versionchanged:: 2.0 + :attr:`dim` now accepts tuples of dimensions. + + Example:: + + >>> x = torch.zeros(2, 1, 2, 1, 2) + >>> x.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x) + >>> y.size() + torch.Size([2, 2, 2]) + >>> y = torch.squeeze(x, 0) + >>> y.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x, 1) + >>> y.size() + torch.Size([2, 2, 1, 2]) + >>> y = torch.squeeze(x, (1, 2, 3)) + torch.Size([2, 2, 2]) + """ + ... +@overload +def squeeze(input: Tensor, dim: Union[str, ellipsis, None]) -> Tensor: + r""" + squeeze(input, dim=None) -> Tensor + + Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed. + + For example, if `input` is of shape: + :math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()` + will be of shape: :math:`(A \times B \times C \times D)`. + + When :attr:`dim` is given, a squeeze operation is done only in the given + dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`, + ``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)`` + will squeeze the tensor to the shape :math:`(A \times B)`. + + .. note:: The returned tensor shares the storage with the input tensor, + so changing the contents of one will change the contents of the other. + + .. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)` + will also remove the batch dimension, which can lead to unexpected + errors. Consider specifying only the dims you wish to be squeezed. + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints, optional): if given, the input will be squeezed + only in the specified dimensions. + + .. versionchanged:: 2.0 + :attr:`dim` now accepts tuples of dimensions. + + Example:: + + >>> x = torch.zeros(2, 1, 2, 1, 2) + >>> x.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x) + >>> y.size() + torch.Size([2, 2, 2]) + >>> y = torch.squeeze(x, 0) + >>> y.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x, 1) + >>> y.size() + torch.Size([2, 2, 1, 2]) + >>> y = torch.squeeze(x, (1, 2, 3)) + torch.Size([2, 2, 2]) + """ + ... +@overload +def squeeze_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.squeeze`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def squeeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.squeeze`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def squeeze_copy(input: Tensor, dim: _size, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.squeeze`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def sspaddmm(beta: Union[Number, _complex], self: Tensor, alpha: Union[Number, _complex], mat1: Tensor, mat2: Tensor) -> Tensor: + r""" + sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + + Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor + :attr:`mat2`, then adds the sparse tensor :attr:`input` to the result. + + Note: This function is equivalent to :func:`torch.addmm`, except + :attr:`input` and :attr:`mat1` are sparse. + + Args: + input (Tensor): a sparse matrix to be added + mat1 (Tensor): a sparse matrix to be matrix multiplied + mat2 (Tensor): a dense matrix to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + """ + ... +@overload +def sspaddmm(input: Tensor, mat1: Tensor, mat2: Tensor, *, beta: Union[Number, _complex] = 1, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + + Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor + :attr:`mat2`, then adds the sparse tensor :attr:`input` to the result. + + Note: This function is equivalent to :func:`torch.addmm`, except + :attr:`input` and :attr:`mat1` are sparse. + + Args: + input (Tensor): a sparse matrix to be added + mat1 (Tensor): a sparse matrix to be matrix multiplied + mat2 (Tensor): a dense matrix to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + """ + ... +@overload +def sspaddmm(beta: Union[Number, _complex], self: Tensor, mat1: Tensor, mat2: Tensor) -> Tensor: + r""" + sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + + Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor + :attr:`mat2`, then adds the sparse tensor :attr:`input` to the result. + + Note: This function is equivalent to :func:`torch.addmm`, except + :attr:`input` and :attr:`mat1` are sparse. + + Args: + input (Tensor): a sparse matrix to be added + mat1 (Tensor): a sparse matrix to be matrix multiplied + mat2 (Tensor): a dense matrix to be matrix multiplied + + Keyword args: + beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + out (Tensor, optional): the output tensor. + """ + ... +def stack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], dim: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: + r""" + stack(tensors, dim=0, *, out=None) -> Tensor + + Concatenates a sequence of tensors along a new dimension. + + All tensors need to be of the same size. + + .. seealso:: + + :func:`torch.cat` concatenates the given sequence along an existing dimension. + + Arguments: + tensors (sequence of Tensors): sequence of tensors to concatenate + dim (int, optional): dimension to insert. Has to be between 0 and the number + of dimensions of concatenated tensors (inclusive). Default: 0 + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 0.3367, 0.1288, 0.2345], + [ 0.2303, -1.1229, -0.1863]]) + >>> x = torch.stack((x, x)) # same as torch.stack((x, x), dim=0) + >>> x + tensor([[[ 0.3367, 0.1288, 0.2345], + [ 0.2303, -1.1229, -0.1863]], + + [[ 0.3367, 0.1288, 0.2345], + [ 0.2303, -1.1229, -0.1863]]]) + >>> x.size() + torch.Size([2, 2, 3]) + >>> x = torch.stack((x, x), dim=1) + tensor([[[ 0.3367, 0.1288, 0.2345], + [ 0.3367, 0.1288, 0.2345]], + + [[ 0.2303, -1.1229, -0.1863], + [ 0.2303, -1.1229, -0.1863]]]) + >>> x = torch.stack((x, x), dim=2) + tensor([[[ 0.3367, 0.3367], + [ 0.1288, 0.1288], + [ 0.2345, 0.2345]], + + [[ 0.2303, 0.2303], + [-1.1229, -1.1229], + [-0.1863, -0.1863]]]) + >>> x = torch.stack((x, x), dim=-1) + tensor([[[ 0.3367, 0.3367], + [ 0.1288, 0.1288], + [ 0.2345, 0.2345]], + + [[ 0.2303, 0.2303], + [-1.1229, -1.1229], + [-0.1863, -0.1863]]]) + """ + ... +@overload +def std(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the standard deviation over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std(a, dim=1, keepdim=True) + tensor([[1.0311], + [0.7477], + [1.2204], + [0.9087]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the standard deviation over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std(a, dim=1, keepdim=True) + tensor([[1.0311], + [0.7477], + [1.2204], + [0.9087]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std(input: Tensor, unbiased: _bool = True) -> Tensor: + r""" + std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the standard deviation over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std(a, dim=1, keepdim=True) + tensor([[1.0311], + [0.7477], + [1.2204], + [0.9087]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the standard deviation over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std(a, dim=1, keepdim=True) + tensor([[1.0311], + [0.7477], + [1.2204], + [0.9087]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the standard deviation over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + dim (int or tuple of ints): the dimension or dimensions to reduce. + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std(a, dim=1, keepdim=True) + tensor([[1.0311], + [0.7477], + [1.2204], + [0.9087]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: + r""" + std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the standard deviation and mean over the dimensions specified by + :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or + ``None`` to reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (std, mean) containing the standard deviation and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std_mean(a, dim=0, keepdim=True) + (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std_mean(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: + r""" + std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the standard deviation and mean over the dimensions specified by + :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or + ``None`` to reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (std, mean) containing the standard deviation and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std_mean(a, dim=0, keepdim=True) + (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std_mean(input: Tensor, unbiased: _bool = True) -> Tuple[Tensor, Tensor]: + r""" + std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the standard deviation and mean over the dimensions specified by + :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or + ``None`` to reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (std, mean) containing the standard deviation and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std_mean(a, dim=0, keepdim=True) + (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: + r""" + std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the standard deviation and mean over the dimensions specified by + :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or + ``None`` to reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (std, mean) containing the standard deviation and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std_mean(a, dim=0, keepdim=True) + (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def std_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: + r""" + std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the standard deviation and mean over the dimensions specified by + :attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or + ``None`` to reduce over all dimensions. + + The standard deviation (:math:`\sigma`) is calculated as + + .. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (std, mean) containing the standard deviation and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std_mean(a, dim=0, keepdim=True) + (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def sub(input: Union[Tensor, Number, _complex], other: Union[Tensor, Number, _complex], *, alpha: Optional[Union[Number, _complex]] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + sub(input, other, *, alpha=1, out=None) -> Tensor + + Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`. + + .. math:: + \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i + + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer, float, and complex inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor or Number): the tensor or number to subtract from :attr:`input`. + + Keyword args: + alpha (Number): the multiplier for :attr:`other`. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor((1, 2)) + >>> b = torch.tensor((0, 1)) + >>> torch.sub(a, b, alpha=2) + tensor([1, 0]) + """ + ... +@overload +def sub(self: Tensor, alpha: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + sub(input, other, *, alpha=1, out=None) -> Tensor + + Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`. + + .. math:: + \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i + + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer, float, and complex inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor or Number): the tensor or number to subtract from :attr:`input`. + + Keyword args: + alpha (Number): the multiplier for :attr:`other`. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor((1, 2)) + >>> b = torch.tensor((0, 1)) + >>> torch.sub(a, b, alpha=2) + tensor([1, 0]) + """ + ... +@overload +def sub(self: Tensor, alpha: Union[Number, _complex], other: Tensor, *, out: Tensor) -> Tensor: + r""" + sub(input, other, *, alpha=1, out=None) -> Tensor + + Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`. + + .. math:: + \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i + + + Supports :ref:`broadcasting to a common shape `, + :ref:`type promotion `, and integer, float, and complex inputs. + + Args: + input (Tensor): the input tensor. + other (Tensor or Number): the tensor or number to subtract from :attr:`input`. + + Keyword args: + alpha (Number): the multiplier for :attr:`other`. + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor((1, 2)) + >>> b = torch.tensor((0, 1)) + >>> torch.sub(a, b, alpha=2) + tensor([1, 0]) + """ + ... +@overload +def subtract(input: Tensor, other: Tensor, *, alpha: Union[Number, _complex] = 1, out: Optional[Tensor] = None) -> Tensor: + r""" + subtract(input, other, *, alpha=1, out=None) -> Tensor + + Alias for :func:`torch.sub`. + """ + ... +@overload +def subtract(input: Tensor, other: Union[Number, _complex], alpha: Union[Number, _complex] = 1) -> Tensor: + r""" + subtract(input, other, *, alpha=1, out=None) -> Tensor + + Alias for :func:`torch.sub`. + """ + ... +@overload +def sum(input: Tensor, *, dtype: Optional[_dtype] = None) -> Tensor: + r""" + sum(input, *, dtype=None) -> Tensor + + Returns the sum of all elements in the :attr:`input` tensor. + + Args: + input (Tensor): the input tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.1133, -0.9567, 0.2958]]) + >>> torch.sum(a) + tensor(-0.5475) + + .. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + + Returns the sum of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, + reduce over all of them. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.0569, -0.2475, 0.0737, -0.3429], + [-0.2993, 0.9138, 0.9337, -1.6864], + [ 0.1132, 0.7892, -0.1003, 0.5688], + [ 0.3637, -0.9906, -0.4752, -1.5197]]) + >>> torch.sum(a, 1) + tensor([-0.4598, -0.1381, 1.3708, -2.6217]) + >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6) + >>> torch.sum(b, (2, 1)) + tensor([ 435., 1335., 2235., 3135.]) + """ + ... +@overload +def sum(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + sum(input, *, dtype=None) -> Tensor + + Returns the sum of all elements in the :attr:`input` tensor. + + Args: + input (Tensor): the input tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.1133, -0.9567, 0.2958]]) + >>> torch.sum(a) + tensor(-0.5475) + + .. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + + Returns the sum of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, + reduce over all of them. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.0569, -0.2475, 0.0737, -0.3429], + [-0.2993, 0.9138, 0.9337, -1.6864], + [ 0.1132, 0.7892, -0.1003, 0.5688], + [ 0.3637, -0.9906, -0.4752, -1.5197]]) + >>> torch.sum(a, 1) + tensor([-0.4598, -0.1381, 1.3708, -2.6217]) + >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6) + >>> torch.sum(b, (2, 1)) + tensor([ 435., 1335., 2235., 3135.]) + """ + ... +@overload +def sum(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool = False, *, dtype: Optional[_dtype] = None, out: Optional[Tensor] = None) -> Tensor: + r""" + sum(input, *, dtype=None) -> Tensor + + Returns the sum of all elements in the :attr:`input` tensor. + + Args: + input (Tensor): the input tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.1133, -0.9567, 0.2958]]) + >>> torch.sum(a) + tensor(-0.5475) + + .. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + + Returns the sum of each row of the :attr:`input` tensor in the given + dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, + reduce over all of them. + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + + Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.0569, -0.2475, 0.0737, -0.3429], + [-0.2993, 0.9138, 0.9337, -1.6864], + [ 0.1132, 0.7892, -0.1003, 0.5688], + [ 0.3637, -0.9906, -0.4752, -1.5197]]) + >>> torch.sum(a, 1) + tensor([-0.4598, -0.1381, 1.3708, -2.6217]) + >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6) + >>> torch.sum(b, (2, 1)) + tensor([ 435., 1335., 2235., 3135.]) + """ + ... +def svd(input: Tensor, some: _bool = True, compute_uv: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.svd: + r""" + svd(input, some=True, compute_uv=True, *, out=None) -> (Tensor, Tensor, Tensor) + + Computes the singular value decomposition of either a matrix or batch of + matrices :attr:`input`. The singular value decomposition is represented as a + namedtuple `(U, S, V)`, such that :attr:`input` :math:`= U \text{diag}(S) V^{\text{H}}`. + where :math:`V^{\text{H}}` is the transpose of `V` for real inputs, + and the conjugate transpose of `V` for complex inputs. + If :attr:`input` is a batch of matrices, then `U`, `S`, and `V` are also + batched with the same batch dimensions as :attr:`input`. + + If :attr:`some` is `True` (default), the method returns the reduced singular + value decomposition. In this case, if the last two dimensions of :attr:`input` are + `m` and `n`, then the returned `U` and `V` matrices will contain only + `min(n, m)` orthonormal columns. + + If :attr:`compute_uv` is `False`, the returned `U` and `V` will be + zero-filled matrices of shape `(m, m)` and `(n, n)` + respectively, and the same device as :attr:`input`. The argument :attr:`some` + has no effect when :attr:`compute_uv` is `False`. + + Supports :attr:`input` of float, double, cfloat and cdouble data types. + The dtypes of `U` and `V` are the same as :attr:`input`'s. `S` will + always be real-valued, even if :attr:`input` is complex. + + .. warning:: + + :func:`torch.svd` is deprecated in favor of :func:`torch.linalg.svd` + and will be removed in a future PyTorch release. + + ``U, S, V = torch.svd(A, some=some, compute_uv=True)`` (default) should be replaced with + + .. code:: python + + U, S, Vh = torch.linalg.svd(A, full_matrices=not some) + V = Vh.mH + + ``_, S, _ = torch.svd(A, some=some, compute_uv=False)`` should be replaced with + + .. code:: python + + S = torch.linalg.svdvals(A) + + .. note:: Differences with :func:`torch.linalg.svd`: + + * :attr:`some` is the opposite of + :func:`torch.linalg.svd`'s :attr:`full_matrices`. Note that + default value for both is `True`, so the default behavior is + effectively the opposite. + * :func:`torch.svd` returns `V`, whereas :func:`torch.linalg.svd` returns + `Vh`, that is, :math:`V^{\text{H}}`. + * If :attr:`compute_uv` is `False`, :func:`torch.svd` returns zero-filled + tensors for `U` and `Vh`, whereas :func:`torch.linalg.svd` returns + empty tensors. + + .. note:: The singular values are returned in descending order. If :attr:`input` is a batch of matrices, + then the singular values of each matrix in the batch are returned in descending order. + + .. note:: The `S` tensor can only be used to compute gradients if :attr:`compute_uv` is `True`. + + .. note:: When :attr:`some` is `False`, the gradients on `U[..., :, min(m, n):]` + and `V[..., :, min(m, n):]` will be ignored in the backward pass, as those vectors + can be arbitrary bases of the corresponding subspaces. + + .. note:: The implementation of :func:`torch.linalg.svd` on CPU uses LAPACK's routine `?gesdd` + (a divide-and-conquer algorithm) instead of `?gesvd` for speed. Analogously, + on GPU, it uses cuSOLVER's routines `gesvdj` and `gesvdjBatched` on CUDA 10.1.243 + and later, and MAGMA's routine `gesdd` on earlier versions of CUDA. + + .. note:: The returned `U` will not be contiguous. The matrix (or batch of matrices) will + be represented as a column-major matrix (i.e. Fortran-contiguous). + + .. warning:: The gradients with respect to `U` and `V` will only be finite when the input does not + have zero nor repeated singular values. + + .. warning:: If the distance between any two singular values is close to zero, the gradients with respect to + `U` and `V` will be numerically unstable, as they depends on + :math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`. The same happens when the matrix + has small singular values, as these gradients also depend on `S^{-1}`. + + .. warning:: For complex-valued :attr:`input` the singular value decomposition is not unique, + as `U` and `V` may be multiplied by an arbitrary phase factor :math:`e^{i \phi}` on every column. + The same happens when :attr:`input` has repeated singular values, where one may multiply + the columns of the spanning subspace in `U` and `V` by a rotation matrix + and `the resulting vectors will span the same subspace`_. + Different platforms, like NumPy, or inputs on different device types, + may produce different `U` and `V` tensors. + + Args: + input (Tensor): the input tensor of size `(*, m, n)` where `*` is zero or more + batch dimensions consisting of `(m, n)` matrices. + some (bool, optional): controls whether to compute the reduced or full decomposition, and + consequently, the shape of returned `U` and `V`. Default: `True`. + compute_uv (bool, optional): controls whether to compute `U` and `V`. Default: `True`. + + Keyword args: + out (tuple, optional): the output tuple of tensors + + Example:: + + >>> a = torch.randn(5, 3) + >>> a + tensor([[ 0.2364, -0.7752, 0.6372], + [ 1.7201, 0.7394, -0.0504], + [-0.3371, -1.0584, 0.5296], + [ 0.3550, -0.4022, 1.5569], + [ 0.2445, -0.0158, 1.1414]]) + >>> u, s, v = torch.svd(a) + >>> u + tensor([[ 0.4027, 0.0287, 0.5434], + [-0.1946, 0.8833, 0.3679], + [ 0.4296, -0.2890, 0.5261], + [ 0.6604, 0.2717, -0.2618], + [ 0.4234, 0.2481, -0.4733]]) + >>> s + tensor([2.3289, 2.0315, 0.7806]) + >>> v + tensor([[-0.0199, 0.8766, 0.4809], + [-0.5080, 0.4054, -0.7600], + [ 0.8611, 0.2594, -0.4373]]) + >>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t())) + tensor(8.6531e-07) + >>> a_big = torch.randn(7, 5, 3) + >>> u, s, v = torch.svd(a_big) + >>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.mT)) + tensor(2.6503e-06) + + .. _the resulting vectors will span the same subspace: + (https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD) + """ + ... +def swapaxes(input: Tensor, axis0: _int, axis1: _int) -> Tensor: + r""" + swapaxes(input, axis0, axis1) -> Tensor + + Alias for :func:`torch.transpose`. + + This function is equivalent to NumPy's swapaxes function. + + Examples:: + + >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]]) + >>> x + tensor([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]) + >>> torch.swapaxes(x, 0, 1) + tensor([[[0, 1], + [4, 5]], + + [[2, 3], + [6, 7]]]) + >>> torch.swapaxes(x, 0, 2) + tensor([[[0, 4], + [2, 6]], + + [[1, 5], + [3, 7]]]) + """ + ... +def swapdims(input: Tensor, dim0: _int, dim1: _int) -> Tensor: + r""" + swapdims(input, dim0, dim1) -> Tensor + + Alias for :func:`torch.transpose`. + + This function is equivalent to NumPy's swapaxes function. + + Examples:: + + >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]]) + >>> x + tensor([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]) + >>> torch.swapdims(x, 0, 1) + tensor([[[0, 1], + [4, 5]], + + [[2, 3], + [6, 7]]]) + >>> torch.swapdims(x, 0, 2) + tensor([[[0, 4], + [2, 6]], + + [[1, 5], + [3, 7]]]) + """ + ... +def sym_constrain_range(size: Union[Number, _complex], *, min: Optional[_int] = None, max: Optional[_int] = None) -> None: ... +def sym_constrain_range_for_size(size: Union[Number, _complex], *, min: Optional[_int] = None, max: Optional[_int] = None) -> None: ... +def t(input: Tensor) -> Tensor: + r""" + t(input) -> Tensor + + Expects :attr:`input` to be <= 2-D tensor and transposes dimensions 0 + and 1. + + 0-D and 1-D tensors are returned as is. When input is a 2-D tensor this + is equivalent to ``transpose(input, 0, 1)``. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> x = torch.randn(()) + >>> x + tensor(0.1995) + >>> torch.t(x) + tensor(0.1995) + >>> x = torch.randn(3) + >>> x + tensor([ 2.4320, -0.4608, 0.7702]) + >>> torch.t(x) + tensor([ 2.4320, -0.4608, 0.7702]) + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 0.4875, 0.9158, -0.5872], + [ 0.3938, -0.6929, 0.6932]]) + >>> torch.t(x) + tensor([[ 0.4875, 0.3938], + [ 0.9158, -0.6929], + [-0.5872, 0.6932]]) + + See also :func:`torch.transpose`. + """ + ... +def t_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.t`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def take(input: Tensor, index: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + take(input, index) -> Tensor + + Returns a new tensor with the elements of :attr:`input` at the given indices. + The input tensor is treated as if it were viewed as a 1-D tensor. The result + takes the same shape as the indices. + + Args: + input (Tensor): the input tensor. + index (LongTensor): the indices into tensor + + Example:: + + >>> src = torch.tensor([[4, 3, 5], + ... [6, 7, 8]]) + >>> torch.take(src, torch.tensor([0, 2, 5])) + tensor([ 4, 5, 8]) + """ + ... +def take_along_dim(input: Tensor, indices: Tensor, dim: Optional[_int] = None, *, out: Optional[Tensor] = None) -> Tensor: + r""" + take_along_dim(input, indices, dim=None, *, out=None) -> Tensor + + Selects values from :attr:`input` at the 1-dimensional indices from :attr:`indices` along the given :attr:`dim`. + + If :attr:`dim` is None, the input array is treated as if it has been flattened to 1d. + + Functions that return indices along a dimension, like :func:`torch.argmax` and :func:`torch.argsort`, + are designed to work with this function. See the examples below. + + .. note:: + This function is similar to NumPy's `take_along_axis`. + See also :func:`torch.gather`. + + Args: + input (Tensor): the input tensor. + indices (tensor): the indices into :attr:`input`. Must have long dtype. + dim (int, optional): dimension to select along. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> t = torch.tensor([[10, 30, 20], [60, 40, 50]]) + >>> max_idx = torch.argmax(t) + >>> torch.take_along_dim(t, max_idx) + tensor([60]) + >>> sorted_idx = torch.argsort(t, dim=1) + >>> torch.take_along_dim(t, sorted_idx, dim=1) + tensor([[10, 20, 30], + [40, 50, 60]]) + """ + ... +def tan(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + tan(input, *, out=None) -> Tensor + + Returns a new tensor with the tangent of the elements of :attr:`input`. + + .. math:: + \text{out}_{i} = \tan(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-1.2027, -1.7687, 0.4412, -1.3856]) + >>> torch.tan(a) + tensor([-2.5930, 4.9859, 0.4722, -5.3366]) + """ + ... +def tan_(input: Tensor) -> Tensor: ... +def tanh(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + tanh(input, *, out=None) -> Tensor + + Returns a new tensor with the hyperbolic tangent of the elements + of :attr:`input`. + + .. math:: + \text{out}_{i} = \tanh(\text{input}_{i}) + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.8986, -0.7279, 1.1745, 0.2611]) + >>> torch.tanh(a) + tensor([ 0.7156, -0.6218, 0.8257, 0.2553]) + """ + ... +def tanh_(input: Tensor) -> Tensor: ... +def tensor(data: Any, dtype: Optional[_dtype] = None, device: Optional[DeviceLikeType] = None, requires_grad: _bool = False, pin_memory: _bool = False) -> Tensor: + r""" + tensor(data, *, dtype=None, device=None, requires_grad=False, pin_memory=False) -> Tensor + + Constructs a tensor with no autograd history (also known as a "leaf tensor", see :doc:`/notes/autograd`) by copying :attr:`data`. + + .. warning:: + + When working with tensors prefer using :func:`torch.Tensor.clone`, + :func:`torch.Tensor.detach`, and :func:`torch.Tensor.requires_grad_` for + readability. Letting `t` be a tensor, ``torch.tensor(t)`` is equivalent to + ``t.clone().detach()``, and ``torch.tensor(t, requires_grad=True)`` + is equivalent to ``t.clone().detach().requires_grad_(True)``. + + .. seealso:: + + :func:`torch.as_tensor` preserves autograd history and avoids copies where possible. + :func:`torch.from_numpy` creates a tensor that shares storage with a NumPy array. + + Args: + data (array_like): Initial data for the tensor. Can be a list, tuple, + NumPy ``ndarray``, scalar, and other types. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, infers data type from :attr:`data`. + device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor + then the device of data is used. If None and data is not a tensor then + the result tensor is constructed on the current device. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + + + Example:: + + >>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]]) + tensor([[ 0.1000, 1.2000], + [ 2.2000, 3.1000], + [ 4.9000, 5.2000]]) + + >>> torch.tensor([0, 1]) # Type inference on data + tensor([ 0, 1]) + + >>> torch.tensor([[0.11111, 0.222222, 0.3333333]], + ... dtype=torch.float64, + ... device=torch.device('cuda:0')) # creates a double tensor on a CUDA device + tensor([[ 0.1111, 0.2222, 0.3333]], dtype=torch.float64, device='cuda:0') + + >>> torch.tensor(3.14159) # Create a zero-dimensional (scalar) tensor + tensor(3.1416) + + >>> torch.tensor([]) # Create an empty tensor (of size (0,)) + tensor([]) + """ + ... +@overload +def tensor_split(input: Tensor, tensor_indices_or_sections: Tensor, dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + tensor_split(input, indices_or_sections, dim=0) -> List of Tensors + + Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`, + along dimension :attr:`dim` according to the indices or number of sections specified + by :attr:`indices_or_sections`. This function is based on NumPy's + :func:`numpy.array_split`. + + Args: + input (Tensor): the tensor to split + indices_or_sections (Tensor, int or list or tuple of ints): + If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor + with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`. + If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each + section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input` + is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)` + sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will + have size :code:`int(input.size(dim) / n)`. + + If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long + tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices + in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0` + would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`. + + If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional + long tensor on the CPU. + + dim (int, optional): dimension along which to split the tensor. Default: ``0`` + + Example:: + + >>> x = torch.arange(8) + >>> torch.tensor_split(x, 3) + (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7])) + + >>> x = torch.arange(7) + >>> torch.tensor_split(x, 3) + (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6])) + >>> torch.tensor_split(x, (1, 6)) + (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6])) + + >>> x = torch.arange(14).reshape(2, 7) + >>> x + tensor([[ 0, 1, 2, 3, 4, 5, 6], + [ 7, 8, 9, 10, 11, 12, 13]]) + >>> torch.tensor_split(x, 3, dim=1) + (tensor([[0, 1, 2], + [7, 8, 9]]), + tensor([[ 3, 4], + [10, 11]]), + tensor([[ 5, 6], + [12, 13]])) + >>> torch.tensor_split(x, (1, 6), dim=1) + (tensor([[0], + [7]]), + tensor([[ 1, 2, 3, 4, 5], + [ 8, 9, 10, 11, 12]]), + tensor([[ 6], + [13]])) + """ + ... +@overload +def tensor_split(input: Tensor, sections: Union[_int, SymInt], dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + tensor_split(input, indices_or_sections, dim=0) -> List of Tensors + + Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`, + along dimension :attr:`dim` according to the indices or number of sections specified + by :attr:`indices_or_sections`. This function is based on NumPy's + :func:`numpy.array_split`. + + Args: + input (Tensor): the tensor to split + indices_or_sections (Tensor, int or list or tuple of ints): + If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor + with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`. + If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each + section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input` + is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)` + sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will + have size :code:`int(input.size(dim) / n)`. + + If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long + tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices + in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0` + would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`. + + If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional + long tensor on the CPU. + + dim (int, optional): dimension along which to split the tensor. Default: ``0`` + + Example:: + + >>> x = torch.arange(8) + >>> torch.tensor_split(x, 3) + (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7])) + + >>> x = torch.arange(7) + >>> torch.tensor_split(x, 3) + (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6])) + >>> torch.tensor_split(x, (1, 6)) + (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6])) + + >>> x = torch.arange(14).reshape(2, 7) + >>> x + tensor([[ 0, 1, 2, 3, 4, 5, 6], + [ 7, 8, 9, 10, 11, 12, 13]]) + >>> torch.tensor_split(x, 3, dim=1) + (tensor([[0, 1, 2], + [7, 8, 9]]), + tensor([[ 3, 4], + [10, 11]]), + tensor([[ 5, 6], + [12, 13]])) + >>> torch.tensor_split(x, (1, 6), dim=1) + (tensor([[0], + [7]]), + tensor([[ 1, 2, 3, 4, 5], + [ 8, 9, 10, 11, 12]]), + tensor([[ 6], + [13]])) + """ + ... +@overload +def tensor_split(input: Tensor, indices: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + tensor_split(input, indices_or_sections, dim=0) -> List of Tensors + + Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`, + along dimension :attr:`dim` according to the indices or number of sections specified + by :attr:`indices_or_sections`. This function is based on NumPy's + :func:`numpy.array_split`. + + Args: + input (Tensor): the tensor to split + indices_or_sections (Tensor, int or list or tuple of ints): + If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor + with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`. + If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each + section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input` + is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)` + sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will + have size :code:`int(input.size(dim) / n)`. + + If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long + tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices + in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0` + would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`. + + If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional + long tensor on the CPU. + + dim (int, optional): dimension along which to split the tensor. Default: ``0`` + + Example:: + + >>> x = torch.arange(8) + >>> torch.tensor_split(x, 3) + (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7])) + + >>> x = torch.arange(7) + >>> torch.tensor_split(x, 3) + (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6])) + >>> torch.tensor_split(x, (1, 6)) + (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6])) + + >>> x = torch.arange(14).reshape(2, 7) + >>> x + tensor([[ 0, 1, 2, 3, 4, 5, 6], + [ 7, 8, 9, 10, 11, 12, 13]]) + >>> torch.tensor_split(x, 3, dim=1) + (tensor([[0, 1, 2], + [7, 8, 9]]), + tensor([[ 3, 4], + [10, 11]]), + tensor([[ 5, 6], + [12, 13]])) + >>> torch.tensor_split(x, (1, 6), dim=1) + (tensor([[0], + [7]]), + tensor([[ 1, 2, 3, 4, 5], + [ 8, 9, 10, 11, 12]]), + tensor([[ 6], + [13]])) + """ + ... +def threshold(input: Tensor, threshold: Union[Number, _complex], value: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: ... +def threshold_(input: Tensor, threshold: Union[Number, _complex], value: Union[Number, _complex]) -> Tensor: ... +def tile(input: Tensor, dims: Sequence[Union[_int, SymInt]]) -> Tensor: + r""" + tile(input, dims) -> Tensor + + Constructs a tensor by repeating the elements of :attr:`input`. + The :attr:`dims` argument specifies the number of repetitions + in each dimension. + + If :attr:`dims` specifies fewer dimensions than :attr:`input` has, then + ones are prepended to :attr:`dims` until all dimensions are specified. + For example, if :attr:`input` has shape (8, 6, 4, 2) and :attr:`dims` + is (2, 2), then :attr:`dims` is treated as (1, 1, 2, 2). + + Analogously, if :attr:`input` has fewer dimensions than :attr:`dims` + specifies, then :attr:`input` is treated as if it were unsqueezed at + dimension zero until it has as many dimensions as :attr:`dims` specifies. + For example, if :attr:`input` has shape (4, 2) and :attr:`dims` + is (3, 3, 2, 2), then :attr:`input` is treated as if it had the + shape (1, 1, 4, 2). + + .. note:: + + This function is similar to NumPy's tile function. + + Args: + input (Tensor): the tensor whose elements to repeat. + dims (tuple): the number of repetitions per dimension. + + Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> x.tile((2,)) + tensor([1, 2, 3, 1, 2, 3]) + >>> y = torch.tensor([[1, 2], [3, 4]]) + >>> torch.tile(y, (2, 2)) + tensor([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) + """ + ... +def topk(input: Tensor, k: Union[_int, SymInt], dim: _int = -1, largest: _bool = True, sorted: _bool = True, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.topk: + r""" + topk(input, k, dim=None, largest=True, sorted=True, *, out=None) -> (Tensor, LongTensor) + + Returns the :attr:`k` largest elements of the given :attr:`input` tensor along + a given dimension. + + If :attr:`dim` is not given, the last dimension of the `input` is chosen. + + If :attr:`largest` is ``False`` then the `k` smallest elements are returned. + + A namedtuple of `(values, indices)` is returned with the `values` and + `indices` of the largest `k` elements of each row of the `input` tensor in the + given dimension `dim`. + + The boolean option :attr:`sorted` if ``True``, will make sure that the returned + `k` elements are themselves sorted + + Args: + input (Tensor): the input tensor. + k (int): the k in "top-k" + dim (int, optional): the dimension to sort along + largest (bool, optional): controls whether to return largest or + smallest elements + sorted (bool, optional): controls whether to return the elements + in sorted order + + Keyword args: + out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be + optionally given to be used as output buffers + + Example:: + + >>> x = torch.arange(1., 6.) + >>> x + tensor([ 1., 2., 3., 4., 5.]) + >>> torch.topk(x, 3) + torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2])) + """ + ... +def trace(input: Tensor) -> Tensor: + r""" + trace(input) -> Tensor + + Returns the sum of the elements of the diagonal of the input 2-D matrix. + + Example:: + + >>> x = torch.arange(1., 10.).view(3, 3) + >>> x + tensor([[ 1., 2., 3.], + [ 4., 5., 6.], + [ 7., 8., 9.]]) + >>> torch.trace(x) + tensor(15.) + """ + ... +@overload +def transpose(input: Tensor, dim0: _int, dim1: _int) -> Tensor: + r""" + transpose(input, dim0, dim1) -> Tensor + + Returns a tensor that is a transposed version of :attr:`input`. + The given dimensions :attr:`dim0` and :attr:`dim1` are swapped. + + If :attr:`input` is a strided tensor then the resulting :attr:`out` + tensor shares its underlying storage with the :attr:`input` tensor, so + changing the content of one would change the content of the other. + + If :attr:`input` is a :ref:`sparse tensor ` then the + resulting :attr:`out` tensor *does not* share the underlying storage + with the :attr:`input` tensor. + + If :attr:`input` is a :ref:`sparse tensor ` with compressed + layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments + :attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must + both be sparse dimensions. The batch dimensions of a sparse tensor are the + dimensions preceding the sparse dimensions. + + .. note:: + Transpositions which interchange the sparse dimensions of a `SparseCSR` + or `SparseCSC` layout tensor will result in the layout changing between + the two options. Transposition of the sparse dimensions of a ` SparseBSR` + or `SparseBSC` layout tensor will likewise generate a result with the + opposite layout. + + + Args: + input (Tensor): the input tensor. + dim0 (int): the first dimension to be transposed + dim1 (int): the second dimension to be transposed + + Example:: + + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 1.0028, -0.9893, 0.5809], + [-0.1669, 0.7299, 0.4942]]) + >>> torch.transpose(x, 0, 1) + tensor([[ 1.0028, -0.1669], + [-0.9893, 0.7299], + [ 0.5809, 0.4942]]) + + See also :func:`torch.t`. + """ + ... +@overload +def transpose(input: Tensor, dim0: Union[str, ellipsis, None], dim1: Union[str, ellipsis, None]) -> Tensor: + r""" + transpose(input, dim0, dim1) -> Tensor + + Returns a tensor that is a transposed version of :attr:`input`. + The given dimensions :attr:`dim0` and :attr:`dim1` are swapped. + + If :attr:`input` is a strided tensor then the resulting :attr:`out` + tensor shares its underlying storage with the :attr:`input` tensor, so + changing the content of one would change the content of the other. + + If :attr:`input` is a :ref:`sparse tensor ` then the + resulting :attr:`out` tensor *does not* share the underlying storage + with the :attr:`input` tensor. + + If :attr:`input` is a :ref:`sparse tensor ` with compressed + layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments + :attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must + both be sparse dimensions. The batch dimensions of a sparse tensor are the + dimensions preceding the sparse dimensions. + + .. note:: + Transpositions which interchange the sparse dimensions of a `SparseCSR` + or `SparseCSC` layout tensor will result in the layout changing between + the two options. Transposition of the sparse dimensions of a ` SparseBSR` + or `SparseBSC` layout tensor will likewise generate a result with the + opposite layout. + + + Args: + input (Tensor): the input tensor. + dim0 (int): the first dimension to be transposed + dim1 (int): the second dimension to be transposed + + Example:: + + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 1.0028, -0.9893, 0.5809], + [-0.1669, 0.7299, 0.4942]]) + >>> torch.transpose(x, 0, 1) + tensor([[ 1.0028, -0.1669], + [-0.9893, 0.7299], + [ 0.5809, 0.4942]]) + + See also :func:`torch.t`. + """ + ... +def transpose_copy(input: Tensor, dim0: _int, dim1: _int, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.transpose`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def trapezoid(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor: + r""" + trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor + + Computes the `trapezoidal rule `_ along + :attr:`dim`. By default the spacing between elements is assumed to be 1, but + :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be + used to specify arbitrary spacing along :attr:`dim`. + + + Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`, + the default computation is + + .. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1}) + \end{aligned} + + When :attr:`dx` is specified the computation becomes + + .. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1}) + \end{aligned} + + effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified, + assuming :attr:`x` is also a one-dimensional tensor with + elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes + + .. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1}) + \end{aligned} + + When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed. + The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x` + and :attr:`y`, the function computes the difference between consecutive elements along + dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have + the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1. + After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule. + See the examples below for details. + + .. note:: + The trapezoidal rule is a technique for approximating the definite integral of a function + by averaging its left and right Riemann sums. The approximation becomes more accurate as + the resolution of the partition increases. + + Arguments: + y (Tensor): Values to use when computing the trapezoidal rule. + x (Tensor): If specified, defines spacing between values as specified above. + + Keyword arguments: + dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx` + are specified then this defaults to 1. Effectively multiplies the result by its value. + dim (int): The dimension along which to compute the trapezoidal rule. + The last (inner-most) dimension by default. + + Examples:: + + >>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1 + >>> y = torch.tensor([1, 5, 10]) + >>> torch.trapezoid(y) + tensor(10.5) + + >>> # Computes the same trapezoidal rule directly to verify + >>> (1 + 10 + 10) / 2 + 10.5 + + >>> # Computes the trapezoidal rule in 1D with constant spacing of 2 + >>> # NOTE: the result is the same as before, but multiplied by 2 + >>> torch.trapezoid(y, dx=2) + 21.0 + + >>> # Computes the trapezoidal rule in 1D with arbitrary spacing + >>> x = torch.tensor([1, 3, 6]) + >>> torch.trapezoid(y, x) + 28.5 + + >>> # Computes the same trapezoidal rule directly to verify + >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2 + 28.5 + + >>> # Computes the trapezoidal rule for each row of a 3x3 matrix + >>> y = torch.arange(9).reshape(3, 3) + tensor([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> torch.trapezoid(y) + tensor([ 2., 8., 14.]) + + >>> # Computes the trapezoidal rule for each column of the matrix + >>> torch.trapezoid(y, dim=0) + tensor([ 6., 8., 10.]) + + >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with the same arbitrary spacing + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([1, 3, 6]) + >>> torch.trapezoid(y, x) + array([5., 5., 5.]) + + >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with different arbitrary spacing per row + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]]) + >>> torch.trapezoid(y, x) + array([2., 4., 6.]) + """ + ... +@overload +def trapezoid(y: Tensor, *, dx: Union[Number, _complex] = 1, dim: _int = -1) -> Tensor: + r""" + trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor + + Computes the `trapezoidal rule `_ along + :attr:`dim`. By default the spacing between elements is assumed to be 1, but + :attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be + used to specify arbitrary spacing along :attr:`dim`. + + + Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`, + the default computation is + + .. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1}) + \end{aligned} + + When :attr:`dx` is specified the computation becomes + + .. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1}) + \end{aligned} + + effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified, + assuming :attr:`x` is also a one-dimensional tensor with + elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes + + .. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1}) + \end{aligned} + + When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed. + The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x` + and :attr:`y`, the function computes the difference between consecutive elements along + dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have + the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1. + After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule. + See the examples below for details. + + .. note:: + The trapezoidal rule is a technique for approximating the definite integral of a function + by averaging its left and right Riemann sums. The approximation becomes more accurate as + the resolution of the partition increases. + + Arguments: + y (Tensor): Values to use when computing the trapezoidal rule. + x (Tensor): If specified, defines spacing between values as specified above. + + Keyword arguments: + dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx` + are specified then this defaults to 1. Effectively multiplies the result by its value. + dim (int): The dimension along which to compute the trapezoidal rule. + The last (inner-most) dimension by default. + + Examples:: + + >>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1 + >>> y = torch.tensor([1, 5, 10]) + >>> torch.trapezoid(y) + tensor(10.5) + + >>> # Computes the same trapezoidal rule directly to verify + >>> (1 + 10 + 10) / 2 + 10.5 + + >>> # Computes the trapezoidal rule in 1D with constant spacing of 2 + >>> # NOTE: the result is the same as before, but multiplied by 2 + >>> torch.trapezoid(y, dx=2) + 21.0 + + >>> # Computes the trapezoidal rule in 1D with arbitrary spacing + >>> x = torch.tensor([1, 3, 6]) + >>> torch.trapezoid(y, x) + 28.5 + + >>> # Computes the same trapezoidal rule directly to verify + >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2 + 28.5 + + >>> # Computes the trapezoidal rule for each row of a 3x3 matrix + >>> y = torch.arange(9).reshape(3, 3) + tensor([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> torch.trapezoid(y) + tensor([ 2., 8., 14.]) + + >>> # Computes the trapezoidal rule for each column of the matrix + >>> torch.trapezoid(y, dim=0) + tensor([ 6., 8., 10.]) + + >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with the same arbitrary spacing + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([1, 3, 6]) + >>> torch.trapezoid(y, x) + array([5., 5., 5.]) + + >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with different arbitrary spacing per row + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]]) + >>> torch.trapezoid(y, x) + array([2., 4., 6.]) + """ + ... +@overload +def trapz(y: Tensor, *, dx: _float = 1, dim: _int = -1) -> Tensor: + r""" + trapz(y, x, *, dim=-1) -> Tensor + + Alias for :func:`torch.trapezoid`. + """ + ... +@overload +def trapz(y: Tensor, x: Tensor, *, dim: _int = -1) -> Tensor: + r""" + trapz(y, x, *, dim=-1) -> Tensor + + Alias for :func:`torch.trapezoid`. + """ + ... +def triangular_solve(input: Tensor, A: Tensor, upper: _bool = True, transpose: _bool = False, unitriangular: _bool = False, *, out: Union[Tensor, Tuple[Tensor, ...], List[Tensor], None] = None) -> torch.return_types.triangular_solve: + r""" + triangular_solve(b, A, upper=True, transpose=False, unitriangular=False, *, out=None) -> (Tensor, Tensor) + + Solves a system of equations with a square upper or lower triangular invertible matrix :math:`A` + and multiple right-hand sides :math:`b`. + + In symbols, it solves :math:`AX = b` and assumes :math:`A` is square upper-triangular + (or lower-triangular if :attr:`upper`\ `= False`) and does not have zeros on the diagonal. + + `torch.triangular_solve(b, A)` can take in 2D inputs `b, A` or inputs that are + batches of 2D matrices. If the inputs are batches, then returns + batched outputs `X` + + If the diagonal of :attr:`A` contains zeros or elements that are very close to zero and + :attr:`unitriangular`\ `= False` (default) or if the input matrix is badly conditioned, + the result may contain `NaN` s. + + Supports input of float, double, cfloat and cdouble data types. + + .. warning:: + + :func:`torch.triangular_solve` is deprecated in favor of :func:`torch.linalg.solve_triangular` + and will be removed in a future PyTorch release. + :func:`torch.linalg.solve_triangular` has its arguments reversed and does not return a + copy of one of the inputs. + + ``X = torch.triangular_solve(B, A).solution`` should be replaced with + + .. code:: python + + X = torch.linalg.solve_triangular(A, B) + + Args: + b (Tensor): multiple right-hand sides of size :math:`(*, m, k)` where + :math:`*` is zero of more batch dimensions + A (Tensor): the input triangular coefficient matrix of size :math:`(*, m, m)` + where :math:`*` is zero or more batch dimensions + upper (bool, optional): whether :math:`A` is upper or lower triangular. Default: ``True``. + transpose (bool, optional): solves `op(A)X = b` where `op(A) = A^T` if this flag is ``True``, + and `op(A) = A` if it is ``False``. Default: ``False``. + unitriangular (bool, optional): whether :math:`A` is unit triangular. + If True, the diagonal elements of :math:`A` are assumed to be + 1 and not referenced from :math:`A`. Default: ``False``. + + Keyword args: + out ((Tensor, Tensor), optional): tuple of two tensors to write + the output to. Ignored if `None`. Default: `None`. + + Returns: + A namedtuple `(solution, cloned_coefficient)` where `cloned_coefficient` + is a clone of :math:`A` and `solution` is the solution :math:`X` to :math:`AX = b` + (or whatever variant of the system of equations, depending on the keyword arguments.) + + Examples:: + + >>> A = torch.randn(2, 2).triu() + >>> A + tensor([[ 1.1527, -1.0753], + [ 0.0000, 0.7986]]) + >>> b = torch.randn(2, 3) + >>> b + tensor([[-0.0210, 2.3513, -1.5492], + [ 1.5429, 0.7403, -1.0243]]) + >>> torch.triangular_solve(b, A) + torch.return_types.triangular_solve( + solution=tensor([[ 1.7841, 2.9046, -2.5405], + [ 1.9320, 0.9270, -1.2826]]), + cloned_coefficient=tensor([[ 1.1527, -1.0753], + [ 0.0000, 0.7986]])) + """ + ... +def tril(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: + r""" + tril(input, diagonal=0, *, out=None) -> Tensor + + Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices + :attr:`input`, the other elements of the result tensor :attr:`out` are set to 0. + + The lower triangular part of the matrix is defined as the elements on and + below the diagonal. + + The argument :attr:`diagonal` controls which diagonal to consider. If + :attr:`diagonal` = 0, all elements on and below the main diagonal are + retained. A positive value includes just as many diagonals above the main + diagonal, and similarly a negative value excludes just as many diagonals below + the main diagonal. The main diagonal are the set of indices + :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where + :math:`d_{1}, d_{2}` are the dimensions of the matrix. + + Args: + input (Tensor): the input tensor. + diagonal (int, optional): the diagonal to consider + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[-1.0813, -0.8619, 0.7105], + [ 0.0935, 0.1380, 2.2112], + [-0.3409, -0.9828, 0.0289]]) + >>> torch.tril(a) + tensor([[-1.0813, 0.0000, 0.0000], + [ 0.0935, 0.1380, 0.0000], + [-0.3409, -0.9828, 0.0289]]) + + >>> b = torch.randn(4, 6) + >>> b + tensor([[ 1.2219, 0.5653, -0.2521, -0.2345, 1.2544, 0.3461], + [ 0.4785, -0.4477, 0.6049, 0.6368, 0.8775, 0.7145], + [ 1.1502, 3.2716, -1.1243, -0.5413, 0.3615, 0.6864], + [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0978]]) + >>> torch.tril(b, diagonal=1) + tensor([[ 1.2219, 0.5653, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.4785, -0.4477, 0.6049, 0.0000, 0.0000, 0.0000], + [ 1.1502, 3.2716, -1.1243, -0.5413, 0.0000, 0.0000], + [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0000]]) + >>> torch.tril(b, diagonal=-1) + tensor([[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.4785, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 1.1502, 3.2716, 0.0000, 0.0000, 0.0000, 0.0000], + [-0.0614, -0.7344, -1.3164, 0.0000, 0.0000, 0.0000]]) + """ + ... +def tril_indices(row: _int, col: _int, offset: _int = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + tril_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor + + Returns the indices of the lower triangular part of a :attr:`row`-by- + :attr:`col` matrix in a 2-by-N Tensor, where the first row contains row + coordinates of all indices and the second row contains column coordinates. + Indices are ordered based on rows and then columns. + + The lower triangular part of the matrix is defined as the elements on and + below the diagonal. + + The argument :attr:`offset` controls which diagonal to consider. If + :attr:`offset` = 0, all elements on and below the main diagonal are + retained. A positive value includes just as many diagonals above the main + diagonal, and similarly a negative value excludes just as many diagonals below + the main diagonal. The main diagonal are the set of indices + :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` + where :math:`d_{1}, d_{2}` are the dimensions of the matrix. + + .. note:: + When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to + prevent overflow during calculation. + + Args: + row (``int``): number of rows in the 2-D matrix. + col (``int``): number of columns in the 2-D matrix. + offset (``int``): diagonal offset from the main diagonal. + Default: if not provided, 0. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, ``torch.long``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + layout (:class:`torch.layout`, optional): currently only support ``torch.strided``. + + Example:: + + >>> a = torch.tril_indices(3, 3) + >>> a + tensor([[0, 1, 1, 2, 2, 2], + [0, 0, 1, 0, 1, 2]]) + + >>> a = torch.tril_indices(4, 3, -1) + >>> a + tensor([[1, 2, 2, 3, 3, 3], + [0, 0, 1, 0, 1, 2]]) + + >>> a = torch.tril_indices(4, 3, 1) + >>> a + tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3], + [0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]]) + """ + ... +def triplet_margin_loss(anchor: Tensor, positive: Tensor, negative: Tensor, margin: _float = 1.0, p: _float = 2, eps: _float = 1e-06, swap: _bool = False, reduction: _int = 1) -> Tensor: ... +def triu(input: Tensor, diagonal: _int = 0, *, out: Optional[Tensor] = None) -> Tensor: + r""" + triu(input, diagonal=0, *, out=None) -> Tensor + + Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices + :attr:`input`, the other elements of the result tensor :attr:`out` are set to 0. + + The upper triangular part of the matrix is defined as the elements on and + above the diagonal. + + The argument :attr:`diagonal` controls which diagonal to consider. If + :attr:`diagonal` = 0, all elements on and above the main diagonal are + retained. A positive value excludes just as many diagonals above the main + diagonal, and similarly a negative value includes just as many diagonals below + the main diagonal. The main diagonal are the set of indices + :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where + :math:`d_{1}, d_{2}` are the dimensions of the matrix. + + Args: + input (Tensor): the input tensor. + diagonal (int, optional): the diagonal to consider + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[ 0.2309, 0.5207, 2.0049], + [ 0.2072, -1.0680, 0.6602], + [ 0.3480, -0.5211, -0.4573]]) + >>> torch.triu(a) + tensor([[ 0.2309, 0.5207, 2.0049], + [ 0.0000, -1.0680, 0.6602], + [ 0.0000, 0.0000, -0.4573]]) + >>> torch.triu(a, diagonal=1) + tensor([[ 0.0000, 0.5207, 2.0049], + [ 0.0000, 0.0000, 0.6602], + [ 0.0000, 0.0000, 0.0000]]) + >>> torch.triu(a, diagonal=-1) + tensor([[ 0.2309, 0.5207, 2.0049], + [ 0.2072, -1.0680, 0.6602], + [ 0.0000, -0.5211, -0.4573]]) + + >>> b = torch.randn(4, 6) + >>> b + tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235], + [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857], + [ 0.4333, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410], + [-0.9888, 1.0679, -1.3337, -1.6556, 0.4798, 0.2830]]) + >>> torch.triu(b, diagonal=1) + tensor([[ 0.0000, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235], + [ 0.0000, 0.0000, -1.2919, 1.3378, -0.1768, -1.0857], + [ 0.0000, 0.0000, 0.0000, -1.0432, 0.9348, -0.4410], + [ 0.0000, 0.0000, 0.0000, 0.0000, 0.4798, 0.2830]]) + >>> torch.triu(b, diagonal=-1) + tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235], + [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857], + [ 0.0000, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410], + [ 0.0000, 0.0000, -1.3337, -1.6556, 0.4798, 0.2830]]) + """ + ... +def triu_indices(row: _int, col: _int, offset: _int = 0, *, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + triu_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor + + Returns the indices of the upper triangular part of a :attr:`row` by + :attr:`col` matrix in a 2-by-N Tensor, where the first row contains row + coordinates of all indices and the second row contains column coordinates. + Indices are ordered based on rows and then columns. + + The upper triangular part of the matrix is defined as the elements on and + above the diagonal. + + The argument :attr:`offset` controls which diagonal to consider. If + :attr:`offset` = 0, all elements on and above the main diagonal are + retained. A positive value excludes just as many diagonals above the main + diagonal, and similarly a negative value includes just as many diagonals below + the main diagonal. The main diagonal are the set of indices + :math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` + where :math:`d_{1}, d_{2}` are the dimensions of the matrix. + + .. note:: + When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to + prevent overflow during calculation. + + Args: + row (``int``): number of rows in the 2-D matrix. + col (``int``): number of columns in the 2-D matrix. + offset (``int``): diagonal offset from the main diagonal. + Default: if not provided, 0. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, ``torch.long``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + layout (:class:`torch.layout`, optional): currently only support ``torch.strided``. + + Example:: + + >>> a = torch.triu_indices(3, 3) + >>> a + tensor([[0, 0, 0, 1, 1, 2], + [0, 1, 2, 1, 2, 2]]) + + >>> a = torch.triu_indices(4, 3, -1) + >>> a + tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3], + [0, 1, 2, 0, 1, 2, 1, 2, 2]]) + + >>> a = torch.triu_indices(4, 3, 1) + >>> a + tensor([[0, 0, 1], + [1, 2, 2]]) + """ + ... +def true_divide(input: Union[Tensor, Number], other: Union[Tensor, Number], *, out: Optional[Tensor] = None) -> Tensor: + r""" + true_divide(dividend, divisor, *, out) -> Tensor + + Alias for :func:`torch.div` with ``rounding_mode=None``. + """ + ... +def trunc(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + trunc(input, *, out=None) -> Tensor + + Returns a new tensor with the truncated integer values of + the elements of :attr:`input`. + + For integer inputs, follows the array-api convention of returning a + copy of the input tensor. + + Args: + input (Tensor): the input tensor. + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 3.4742, 0.5466, -0.8008, -0.9079]) + >>> torch.trunc(a) + tensor([ 3., 0., -0., -0.]) + """ + ... +def trunc_(input: Tensor) -> Tensor: ... +@overload +def unbind(input: Tensor, dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + unbind(input, dim=0) -> seq + + Removes a tensor dimension. + + Returns a tuple of all slices along a given dimension, already without it. + + Arguments: + input (Tensor): the tensor to unbind + dim (int): dimension to remove + + Example:: + + >>> torch.unbind(torch.tensor([[1, 2, 3], + >>> [4, 5, 6], + >>> [7, 8, 9]])) + (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9])) + """ + ... +@overload +def unbind(input: Tensor, dim: Union[str, ellipsis, None]) -> Tuple[Tensor, ...]: + r""" + unbind(input, dim=0) -> seq + + Removes a tensor dimension. + + Returns a tuple of all slices along a given dimension, already without it. + + Arguments: + input (Tensor): the tensor to unbind + dim (int): dimension to remove + + Example:: + + >>> torch.unbind(torch.tensor([[1, 2, 3], + >>> [4, 5, 6], + >>> [7, 8, 9]])) + (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9])) + """ + ... +def unbind_copy(input: Tensor, dim: _int = 0, *, out: Union[Tuple[Tensor, ...], List[Tensor], None] = None) -> None: + r""" + Performs the same operation as :func:`torch.unbind`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def unflatten(input: Tensor, dim: Union[str, ellipsis, None], sizes: Sequence[Union[_int, SymInt]], names: Sequence[Union[str, ellipsis, None]]) -> Tensor: + r""" + unflatten(input, dim, sizes) -> Tensor + + Expands a dimension of the input tensor over multiple dimensions. + + .. seealso:: + + :func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one. + + Args: + input (Tensor): the input tensor. + dim (int): Dimension to be unflattened, specified as an index into + ``input.shape``. + sizes (Tuple[int]): New shape of the unflattened dimension. + One of its elements can be `-1` in which case the corresponding output + dimension is inferred. Otherwise, the product of ``sizes`` *must* + equal ``input.shape[dim]``. + + Returns: + A View of input with the specified dimension unflattened. + + Examples:: + >>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape + torch.Size([3, 2, 2, 1]) + >>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape + torch.Size([3, 2, 2, 1]) + >>> torch.unflatten(torch.randn(5, 12, 3), -2, (2, 2, 3, 1, 1)).shape + torch.Size([5, 2, 2, 3, 1, 1, 3]) + """ + ... +@overload +def unflatten(input: Tensor, dim: _int, sizes: Sequence[Union[_int, SymInt]]) -> Tensor: + r""" + unflatten(input, dim, sizes) -> Tensor + + Expands a dimension of the input tensor over multiple dimensions. + + .. seealso:: + + :func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one. + + Args: + input (Tensor): the input tensor. + dim (int): Dimension to be unflattened, specified as an index into + ``input.shape``. + sizes (Tuple[int]): New shape of the unflattened dimension. + One of its elements can be `-1` in which case the corresponding output + dimension is inferred. Otherwise, the product of ``sizes`` *must* + equal ``input.shape[dim]``. + + Returns: + A View of input with the specified dimension unflattened. + + Examples:: + >>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape + torch.Size([3, 2, 2, 1]) + >>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape + torch.Size([3, 2, 2, 1]) + >>> torch.unflatten(torch.randn(5, 12, 3), -2, (2, 2, 3, 1, 1)).shape + torch.Size([5, 2, 2, 3, 1, 1, 3]) + """ + ... +def unfold_copy(input: Tensor, dimension: _int, size: _int, step: _int, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.unfold`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def unique_dim(input: Tensor, dim: _int, sorted: _bool = True, return_inverse: _bool = False, return_counts: _bool = False) -> Tuple[Tensor, Tensor, Tensor]: ... +def unsafe_chunk(input: Tensor, chunks: _int, dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + unsafe_chunk(input, chunks, dim=0) -> List of Tensors + + Works like :func:`torch.chunk` but without enforcing the autograd restrictions + on inplace modification of the outputs. + + .. warning:: + This function is safe to use as long as only the input, or only the outputs + are modified inplace after calling this function. It is user's + responsibility to ensure that is the case. If both the input and one or more + of the outputs are modified inplace, gradients computed by autograd will be + silently incorrect. + """ + ... +def unsafe_split(input: Tensor, split_size: Union[_int, SymInt], dim: _int = 0) -> Tuple[Tensor, ...]: + r""" + unsafe_split(tensor, split_size_or_sections, dim=0) -> List of Tensors + + Works like :func:`torch.split` but without enforcing the autograd restrictions + on inplace modification of the outputs. + + .. warning:: + This function is safe to use as long as only the input, or only the outputs + are modified inplace after calling this function. It is user's + responsibility to ensure that is the case. If both the input and one or more + of the outputs are modified inplace, gradients computed by autograd will be + silently incorrect. + """ + ... +def unsafe_split_with_sizes(input: Tensor, split_sizes: Sequence[Union[_int, SymInt]], dim: _int = 0) -> Tuple[Tensor, ...]: ... +def unsqueeze(input: Tensor, dim: _int) -> Tensor: + r""" + unsqueeze(input, dim) -> Tensor + + Returns a new tensor with a dimension of size one inserted at the + specified position. + + The returned tensor shares the same underlying data with this tensor. + + A :attr:`dim` value within the range ``[-input.dim() - 1, input.dim() + 1)`` + can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze` + applied at :attr:`dim` = ``dim + input.dim() + 1``. + + Args: + input (Tensor): the input tensor. + dim (int): the index at which to insert the singleton dimension + + Example:: + + >>> x = torch.tensor([1, 2, 3, 4]) + >>> torch.unsqueeze(x, 0) + tensor([[ 1, 2, 3, 4]]) + >>> torch.unsqueeze(x, 1) + tensor([[ 1], + [ 2], + [ 3], + [ 4]]) + """ + ... +def unsqueeze_copy(input: Tensor, dim: _int, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.unsqueeze`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def values_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.values`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def vander(x: Tensor, N: Optional[_int] = None, increasing: _bool = False) -> Tensor: + r""" + vander(x, N=None, increasing=False) -> Tensor + + Generates a Vandermonde matrix. + + The columns of the output matrix are elementwise powers of the input vector :math:`x^{(N-1)}, x^{(N-2)}, ..., x^0`. + If increasing is True, the order of the columns is reversed :math:`x^0, x^1, ..., x^{(N-1)}`. Such a + matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde. + + Arguments: + x (Tensor): 1-D input tensor. + N (int, optional): Number of columns in the output. If N is not specified, + a square array is returned :math:`(N = len(x))`. + increasing (bool, optional): Order of the powers of the columns. If True, + the powers increase from left to right, if False (the default) they are reversed. + + Returns: + Tensor: Vandermonde matrix. If increasing is False, the first column is :math:`x^{(N-1)}`, + the second :math:`x^{(N-2)}` and so forth. If increasing is True, the columns + are :math:`x^0, x^1, ..., x^{(N-1)}`. + + Example:: + + >>> x = torch.tensor([1, 2, 3, 5]) + >>> torch.vander(x) + tensor([[ 1, 1, 1, 1], + [ 8, 4, 2, 1], + [ 27, 9, 3, 1], + [125, 25, 5, 1]]) + >>> torch.vander(x, N=3) + tensor([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + >>> torch.vander(x, N=3, increasing=True) + tensor([[ 1, 1, 1], + [ 1, 2, 4], + [ 1, 3, 9], + [ 1, 5, 25]]) + """ + ... +@overload +def var(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim` + can be a single dimension, list of dimensions, or ``None`` to reduce over all + dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var(a, dim=1, keepdim=True) + tensor([[1.0631], + [0.5590], + [1.4893], + [0.8258]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim` + can be a single dimension, list of dimensions, or ``None`` to reduce over all + dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var(a, dim=1, keepdim=True) + tensor([[1.0631], + [0.5590], + [1.4893], + [0.8258]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var(input: Tensor, unbiased: _bool = True) -> Tensor: + r""" + var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim` + can be a single dimension, list of dimensions, or ``None`` to reduce over all + dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var(a, dim=1, keepdim=True) + tensor([[1.0631], + [0.5590], + [1.4893], + [0.8258]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False, out: Optional[Tensor] = None) -> Tensor: + r""" + var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim` + can be a single dimension, list of dimensions, or ``None`` to reduce over all + dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var(a, dim=1, keepdim=True) + tensor([[1.0631], + [0.5590], + [1.4893], + [0.8258]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False, *, out: Optional[Tensor] = None) -> Tensor: + r""" + var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + + Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim` + can be a single dimension, list of dimensions, or ``None`` to reduce over all + dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var(a, dim=1, keepdim=True) + tensor([[1.0631], + [0.5590], + [1.4893], + [0.8258]]) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var_mean(input: Tensor, dim: Optional[Union[_int, _size]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: + r""" + var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the variance and mean over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (var, mean) containing the variance and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var_mean(a, dim=0, keepdim=True) + (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var_mean(input: Tensor, dim: Optional[Union[_int, _size]] = None, *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: + r""" + var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the variance and mean over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (var, mean) containing the variance and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var_mean(a, dim=0, keepdim=True) + (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var_mean(input: Tensor, unbiased: _bool = True) -> Tuple[Tensor, Tensor]: + r""" + var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the variance and mean over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (var, mean) containing the variance and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var_mean(a, dim=0, keepdim=True) + (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], *, correction: Optional[Union[Number, _complex]] = None, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: + r""" + var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the variance and mean over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (var, mean) containing the variance and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var_mean(a, dim=0, keepdim=True) + (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +@overload +def var_mean(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], unbiased: _bool = True, keepdim: _bool = False) -> Tuple[Tensor, Tensor]: + r""" + var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + + Calculates the variance and mean over the dimensions specified by :attr:`dim`. + :attr:`dim` can be a single dimension, list of dimensions, or ``None`` to + reduce over all dimensions. + + The variance (:math:`\sigma^2`) is calculated as + + .. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + + where :math:`x` is the sample set of elements, :math:`\bar{x}` is the + sample mean, :math:`N` is the number of samples and :math:`\delta N` is + the :attr:`correction`. + + + + If :attr:`keepdim` is ``True``, the output tensor is of the same size + as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. + Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the + output tensor having 1 (or ``len(dim)``) fewer dimension(s). + + + Args: + input (Tensor): the input tensor. + + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. + + + Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. + out (Tensor, optional): the output tensor. + + Returns: + A tuple (var, mean) containing the variance and mean. + + Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var_mean(a, dim=0, keepdim=True) + (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + + .. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + """ + ... +def vdot(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + vdot(input, other, *, out=None) -> Tensor + + Computes the dot product of two 1D vectors along a dimension. + + In symbols, this function computes + + .. math:: + + \sum_{i=1}^n \overline{x_i}y_i. + + where :math:`\overline{x_i}` denotes the conjugate for complex + vectors, and it is the identity for real vectors. + + .. note:: + + Unlike NumPy's vdot, torch.vdot intentionally only supports computing the dot product + of two 1D tensors with the same number of elements. + + .. seealso:: + + :func:`torch.linalg.vecdot` computes the dot product of two batches of vectors along a dimension. + + Args: + input (Tensor): first tensor in the dot product, must be 1D. Its conjugate is used if it's complex. + other (Tensor): second tensor in the dot product, must be 1D. + + Keyword args: + + .. note:: out (Tensor, optional): the output tensor. + + + Example:: + + >>> torch.vdot(torch.tensor([2, 3]), torch.tensor([2, 1])) + tensor(7) + >>> a = torch.tensor((1 +2j, 3 - 1j)) + >>> b = torch.tensor((2 +1j, 4 - 0j)) + >>> torch.vdot(a, b) + tensor([16.+1.j]) + >>> torch.vdot(b, a) + tensor([16.-1.j]) + """ + ... +def view_as_complex(input: Tensor) -> Tensor: + r""" + view_as_complex(input) -> Tensor + + Returns a view of :attr:`input` as a complex tensor. For an input complex + tensor of :attr:`size` :math:`m1, m2, \dots, mi, 2`, this function returns a + new complex tensor of :attr:`size` :math:`m1, m2, \dots, mi` where the last + dimension of the input tensor is expected to represent the real and imaginary + components of complex numbers. + + .. warning:: + :func:`view_as_complex` is only supported for tensors with + :class:`torch.dtype` ``torch.float64`` and ``torch.float32``. The input is + expected to have the last dimension of :attr:`size` 2. In addition, the + tensor must have a `stride` of 1 for its last dimension. The strides of all + other dimensions must be even numbers. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> x=torch.randn(4, 2) + >>> x + tensor([[ 1.6116, -0.5772], + [-1.4606, -0.9120], + [ 0.0786, -1.7497], + [-0.6561, -1.6623]]) + >>> torch.view_as_complex(x) + tensor([(1.6116-0.5772j), (-1.4606-0.9120j), (0.0786-1.7497j), (-0.6561-1.6623j)]) + """ + ... +def view_as_complex_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.view_as_complex`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +def view_as_real(input: Tensor) -> Tensor: + r""" + view_as_real(input) -> Tensor + + Returns a view of :attr:`input` as a real tensor. For an input complex tensor of + :attr:`size` :math:`m1, m2, \dots, mi`, this function returns a new + real tensor of size :math:`m1, m2, \dots, mi, 2`, where the last dimension of size 2 + represents the real and imaginary components of complex numbers. + + .. warning:: + :func:`view_as_real` is only supported for tensors with ``complex dtypes``. + + Args: + input (Tensor): the input tensor. + + Example:: + + >>> x=torch.randn(4, dtype=torch.cfloat) + >>> x + tensor([(0.4737-0.3839j), (-0.2098-0.6699j), (0.3470-0.9451j), (-0.5174-1.3136j)]) + >>> torch.view_as_real(x) + tensor([[ 0.4737, -0.3839], + [-0.2098, -0.6699], + [ 0.3470, -0.9451], + [-0.5174, -1.3136]]) + """ + ... +def view_as_real_copy(input: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.view_as_real`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def view_copy(input: Tensor, dtype: _dtype, *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.view`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def view_copy(input: Tensor, size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None) -> Tensor: + r""" + Performs the same operation as :func:`torch.view`, but all output tensors + are freshly created instead of aliasing the input. + """ + ... +@overload +def vsplit(input: Tensor, sections: _int) -> Tuple[Tensor, ...]: + r""" + vsplit(input, indices_or_sections) -> List of Tensors + + Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors + vertically according to :attr:`indices_or_sections`. Each split is a view of + :attr:`input`. + + This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0) + (the split dimension is 0), except that if :attr:`indices_or_sections` is an integer + it must evenly divide the split dimension or a runtime error will be thrown. + + This function is based on NumPy's :func:`numpy.vsplit`. + + Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + + Example:: + >>> t = torch.arange(16.0).reshape(4,4) + >>> t + tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> torch.vsplit(t, 2) + (tensor([[0., 1., 2., 3.], + [4., 5., 6., 7.]]), + tensor([[ 8., 9., 10., 11.], + [12., 13., 14., 15.]])) + >>> torch.vsplit(t, [3, 6]) + (tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]]), + tensor([[12., 13., 14., 15.]]), + tensor([], size=(0, 4))) + """ + ... +@overload +def vsplit(input: Tensor, indices: _size) -> Tuple[Tensor, ...]: + r""" + vsplit(input, indices_or_sections) -> List of Tensors + + Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors + vertically according to :attr:`indices_or_sections`. Each split is a view of + :attr:`input`. + + This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0) + (the split dimension is 0), except that if :attr:`indices_or_sections` is an integer + it must evenly divide the split dimension or a runtime error will be thrown. + + This function is based on NumPy's :func:`numpy.vsplit`. + + Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + + Example:: + >>> t = torch.arange(16.0).reshape(4,4) + >>> t + tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> torch.vsplit(t, 2) + (tensor([[0., 1., 2., 3.], + [4., 5., 6., 7.]]), + tensor([[ 8., 9., 10., 11.], + [12., 13., 14., 15.]])) + >>> torch.vsplit(t, [3, 6]) + (tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]]), + tensor([[12., 13., 14., 15.]]), + tensor([], size=(0, 4))) + """ + ... +def vstack(tensors: Union[Tuple[Tensor, ...], List[Tensor]], *, out: Optional[Tensor] = None) -> Tensor: + r""" + vstack(tensors, *, out=None) -> Tensor + + Stack tensors in sequence vertically (row wise). + + This is equivalent to concatenation along the first axis after all 1-D tensors have been reshaped by :func:`torch.atleast_2d`. + + Args: + tensors (sequence of Tensors): sequence of tensors to concatenate + + Keyword args: + out (Tensor, optional): the output tensor. + + Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5, 6]) + >>> torch.vstack((a,b)) + tensor([[1, 2, 3], + [4, 5, 6]]) + >>> a = torch.tensor([[1],[2],[3]]) + >>> b = torch.tensor([[4],[5],[6]]) + >>> torch.vstack((a,b)) + tensor([[1], + [2], + [3], + [4], + [5], + [6]]) + """ + ... +@overload +def where(condition: Tensor) -> Tuple[Tensor, ...]: + r""" + where(condition, input, other, *, out=None) -> Tensor + + Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`. + + The operation is defined as: + + .. math:: + \text{out}_i = \begin{cases} + \text{input}_i & \text{if } \text{condition}_i \\ + \text{other}_i & \text{otherwise} \\ + \end{cases} + + .. note:: + The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable `. + + Arguments: + condition (BoolTensor): When True (nonzero), yield input, otherwise yield other + input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices + where :attr:`condition` is ``True`` + other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices + where :attr:`condition` is ``False`` + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other` + + Example:: + + >>> x = torch.randn(3, 2) + >>> y = torch.ones(3, 2) + >>> x + tensor([[-0.4620, 0.3139], + [ 0.3898, -0.7197], + [ 0.0478, -0.1657]]) + >>> torch.where(x > 0, 1.0, 0.0) + tensor([[0., 1.], + [1., 0.], + [1., 0.]]) + >>> torch.where(x > 0, x, y) + tensor([[ 1.0000, 0.3139], + [ 0.3898, 1.0000], + [ 0.0478, 1.0000]]) + >>> x = torch.randn(2, 2, dtype=torch.double) + >>> x + tensor([[ 1.0779, 0.0383], + [-0.8785, -1.1089]], dtype=torch.float64) + >>> torch.where(x > 0, x, 0.) + tensor([[1.0779, 0.0383], + [0.0000, 0.0000]], dtype=torch.float64) + + .. function:: where(condition) -> tuple of LongTensor + :noindex: + + ``torch.where(condition)`` is identical to + ``torch.nonzero(condition, as_tuple=True)``. + + .. note:: + See also :func:`torch.nonzero`. + """ + ... +@overload +def where(condition: Tensor, input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + where(condition, input, other, *, out=None) -> Tensor + + Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`. + + The operation is defined as: + + .. math:: + \text{out}_i = \begin{cases} + \text{input}_i & \text{if } \text{condition}_i \\ + \text{other}_i & \text{otherwise} \\ + \end{cases} + + .. note:: + The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable `. + + Arguments: + condition (BoolTensor): When True (nonzero), yield input, otherwise yield other + input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices + where :attr:`condition` is ``True`` + other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices + where :attr:`condition` is ``False`` + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other` + + Example:: + + >>> x = torch.randn(3, 2) + >>> y = torch.ones(3, 2) + >>> x + tensor([[-0.4620, 0.3139], + [ 0.3898, -0.7197], + [ 0.0478, -0.1657]]) + >>> torch.where(x > 0, 1.0, 0.0) + tensor([[0., 1.], + [1., 0.], + [1., 0.]]) + >>> torch.where(x > 0, x, y) + tensor([[ 1.0000, 0.3139], + [ 0.3898, 1.0000], + [ 0.0478, 1.0000]]) + >>> x = torch.randn(2, 2, dtype=torch.double) + >>> x + tensor([[ 1.0779, 0.0383], + [-0.8785, -1.1089]], dtype=torch.float64) + >>> torch.where(x > 0, x, 0.) + tensor([[1.0779, 0.0383], + [0.0000, 0.0000]], dtype=torch.float64) + + .. function:: where(condition) -> tuple of LongTensor + :noindex: + + ``torch.where(condition)`` is identical to + ``torch.nonzero(condition, as_tuple=True)``. + + .. note:: + See also :func:`torch.nonzero`. + """ + ... +@overload +def where(condition: Tensor, self: Union[Number, _complex], other: Tensor) -> Tensor: + r""" + where(condition, input, other, *, out=None) -> Tensor + + Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`. + + The operation is defined as: + + .. math:: + \text{out}_i = \begin{cases} + \text{input}_i & \text{if } \text{condition}_i \\ + \text{other}_i & \text{otherwise} \\ + \end{cases} + + .. note:: + The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable `. + + Arguments: + condition (BoolTensor): When True (nonzero), yield input, otherwise yield other + input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices + where :attr:`condition` is ``True`` + other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices + where :attr:`condition` is ``False`` + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other` + + Example:: + + >>> x = torch.randn(3, 2) + >>> y = torch.ones(3, 2) + >>> x + tensor([[-0.4620, 0.3139], + [ 0.3898, -0.7197], + [ 0.0478, -0.1657]]) + >>> torch.where(x > 0, 1.0, 0.0) + tensor([[0., 1.], + [1., 0.], + [1., 0.]]) + >>> torch.where(x > 0, x, y) + tensor([[ 1.0000, 0.3139], + [ 0.3898, 1.0000], + [ 0.0478, 1.0000]]) + >>> x = torch.randn(2, 2, dtype=torch.double) + >>> x + tensor([[ 1.0779, 0.0383], + [-0.8785, -1.1089]], dtype=torch.float64) + >>> torch.where(x > 0, x, 0.) + tensor([[1.0779, 0.0383], + [0.0000, 0.0000]], dtype=torch.float64) + + .. function:: where(condition) -> tuple of LongTensor + :noindex: + + ``torch.where(condition)`` is identical to + ``torch.nonzero(condition, as_tuple=True)``. + + .. note:: + See also :func:`torch.nonzero`. + """ + ... +@overload +def where(condition: Tensor, input: Tensor, other: Union[Number, _complex]) -> Tensor: + r""" + where(condition, input, other, *, out=None) -> Tensor + + Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`. + + The operation is defined as: + + .. math:: + \text{out}_i = \begin{cases} + \text{input}_i & \text{if } \text{condition}_i \\ + \text{other}_i & \text{otherwise} \\ + \end{cases} + + .. note:: + The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable `. + + Arguments: + condition (BoolTensor): When True (nonzero), yield input, otherwise yield other + input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices + where :attr:`condition` is ``True`` + other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices + where :attr:`condition` is ``False`` + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other` + + Example:: + + >>> x = torch.randn(3, 2) + >>> y = torch.ones(3, 2) + >>> x + tensor([[-0.4620, 0.3139], + [ 0.3898, -0.7197], + [ 0.0478, -0.1657]]) + >>> torch.where(x > 0, 1.0, 0.0) + tensor([[0., 1.], + [1., 0.], + [1., 0.]]) + >>> torch.where(x > 0, x, y) + tensor([[ 1.0000, 0.3139], + [ 0.3898, 1.0000], + [ 0.0478, 1.0000]]) + >>> x = torch.randn(2, 2, dtype=torch.double) + >>> x + tensor([[ 1.0779, 0.0383], + [-0.8785, -1.1089]], dtype=torch.float64) + >>> torch.where(x > 0, x, 0.) + tensor([[1.0779, 0.0383], + [0.0000, 0.0000]], dtype=torch.float64) + + .. function:: where(condition) -> tuple of LongTensor + :noindex: + + ``torch.where(condition)`` is identical to + ``torch.nonzero(condition, as_tuple=True)``. + + .. note:: + See also :func:`torch.nonzero`. + """ + ... +@overload +def where(condition: Tensor, self: Union[Number, _complex], other: Union[Number, _complex]) -> Tensor: + r""" + where(condition, input, other, *, out=None) -> Tensor + + Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`. + + The operation is defined as: + + .. math:: + \text{out}_i = \begin{cases} + \text{input}_i & \text{if } \text{condition}_i \\ + \text{other}_i & \text{otherwise} \\ + \end{cases} + + .. note:: + The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable `. + + Arguments: + condition (BoolTensor): When True (nonzero), yield input, otherwise yield other + input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices + where :attr:`condition` is ``True`` + other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices + where :attr:`condition` is ``False`` + + Keyword args: + out (Tensor, optional): the output tensor. + + Returns: + Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other` + + Example:: + + >>> x = torch.randn(3, 2) + >>> y = torch.ones(3, 2) + >>> x + tensor([[-0.4620, 0.3139], + [ 0.3898, -0.7197], + [ 0.0478, -0.1657]]) + >>> torch.where(x > 0, 1.0, 0.0) + tensor([[0., 1.], + [1., 0.], + [1., 0.]]) + >>> torch.where(x > 0, x, y) + tensor([[ 1.0000, 0.3139], + [ 0.3898, 1.0000], + [ 0.0478, 1.0000]]) + >>> x = torch.randn(2, 2, dtype=torch.double) + >>> x + tensor([[ 1.0779, 0.0383], + [-0.8785, -1.1089]], dtype=torch.float64) + >>> torch.where(x > 0, x, 0.) + tensor([[1.0779, 0.0383], + [0.0000, 0.0000]], dtype=torch.float64) + + .. function:: where(condition) -> tuple of LongTensor + :noindex: + + ``torch.where(condition)`` is identical to + ``torch.nonzero(condition, as_tuple=True)``. + + .. note:: + See also :func:`torch.nonzero`. + """ + ... +@overload +def xlogy(input: Tensor, other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + xlogy(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.special.xlogy`. + """ + ... +@overload +def xlogy(self: Union[Number, _complex], other: Tensor, *, out: Optional[Tensor] = None) -> Tensor: + r""" + xlogy(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.special.xlogy`. + """ + ... +@overload +def xlogy(input: Tensor, other: Union[Number, _complex], *, out: Optional[Tensor] = None) -> Tensor: + r""" + xlogy(input, other, *, out=None) -> Tensor + + Alias for :func:`torch.special.xlogy`. + """ + ... +@overload +def xlogy_(input: Tensor, other: Tensor) -> Tensor: ... +@overload +def xlogy_(input: Tensor, other: Union[Number, _complex]) -> Tensor: ... +def zero_(input: Tensor) -> Tensor: ... +@overload +def zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with the scalar value `0`, with the shape defined + by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.zeros(2, 3) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]]) + + >>> torch.zeros(5) + tensor([ 0., 0., 0., 0., 0.]) + """ + ... +@overload +def zeros(*size: _int, out: Optional[Tensor] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with the scalar value `0`, with the shape defined + by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.zeros(2, 3) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]]) + + >>> torch.zeros(5) + tensor([ 0., 0., 0., 0., 0.]) + """ + ... +@overload +def zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with the scalar value `0`, with the shape defined + by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.zeros(2, 3) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]]) + + >>> torch.zeros(5) + tensor([ 0., 0., 0., 0., 0.]) + """ + ... +@overload +def zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + + Returns a tensor filled with the scalar value `0`, with the shape defined + by the variable argument :attr:`size`. + + Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + + Keyword args: + out (Tensor, optional): the output tensor. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + + Example:: + + >>> torch.zeros(2, 3) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]]) + + >>> torch.zeros(5) + tensor([ 0., 0., 0., 0., 0.]) + """ + ... +def zeros_like(input: Tensor, *, memory_format: Optional[memory_format] = None, dtype: Optional[_dtype] = None, layout: Optional[_layout] = None, device: Optional[Optional[DeviceLikeType]] = None, pin_memory: Optional[_bool] = False, requires_grad: Optional[_bool] = False) -> Tensor: + r""" + zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + + Returns a tensor filled with the scalar value `0`, with the same size as + :attr:`input`. ``torch.zeros_like(input)`` is equivalent to + ``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + + .. warning:: + As of 0.4, this function does not support an :attr:`out` keyword. As an alternative, + the old ``torch.zeros_like(input, out=output)`` is equivalent to + ``torch.zeros(input.size(), out=output)``. + + Args: + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + + Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. + + Example:: + + >>> input = torch.empty(2, 3) + >>> torch.zeros_like(input) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]]) + """ + ... + +__all__ = ['__and__', '__lshift__', '__or__', '__rshift__', '__xor__', '_adaptive_avg_pool2d', + '_adaptive_avg_pool3d', '_add_batch_dim', '_add_relu', '_add_relu_', '_addmm_activation', + '_aminmax', '_amp_foreach_non_finite_check_and_unscale_', '_amp_update_scale_', '_assert_async', + '_assert_scalar', '_assert_tensor_metadata', '_batch_norm_impl_index', '_cast_Byte', '_cast_Char', + '_cast_Double', '_cast_Float', '_cast_Half', '_cast_Int', '_cast_Long', '_cast_Short', + '_choose_qparams_per_tensor', '_chunk_cat', '_coalesce', '_compute_linear_combination', '_conj', + '_conj_copy', '_conj_physical', '_convert_indices_from_coo_to_csr', + '_convert_indices_from_csr_to_coo', '_convert_weight_to_int4pack', '_convolution', + '_convolution_mode', '_copy_from', '_copy_from_and_resize', '_cslt_compress', '_cslt_sparse_mm', + '_cslt_sparse_mm_search', '_ctc_loss', '_cudnn_ctc_loss', '_cudnn_init_dropout_state', + '_cudnn_rnn', '_cudnn_rnn_flatten_weight', '_cufft_clear_plan_cache', + '_cufft_get_plan_cache_max_size', '_cufft_get_plan_cache_size', '_cufft_set_plan_cache_max_size', + '_cummax_helper', '_cummin_helper', '_debug_has_internal_overlap', '_dim_arange', + '_dirichlet_grad', '_disable_functionalization', '_efficientzerotensor', '_embedding_bag', + '_embedding_bag_forward_only', '_empty_affine_quantized', '_empty_per_channel_affine_quantized', + '_enable_functionalization', '_euclidean_dist', '_fake_quantize_learnable_per_channel_affine', + '_fake_quantize_learnable_per_tensor_affine', + '_fake_quantize_per_tensor_affine_cachemask_tensor_qparams', + '_fake_quantize_per_tensor_affine_cachemask_tensor_qparams', '_fft_c2c', '_fft_c2r', '_fft_r2c', + '_fill_mem_eff_dropout_mask_', '_foobar', '_foreach_abs', '_foreach_abs_', '_foreach_acos', + '_foreach_acos_', '_foreach_add', '_foreach_add_', '_foreach_addcdiv', '_foreach_addcdiv_', + '_foreach_addcmul', '_foreach_addcmul_', '_foreach_asin', '_foreach_asin_', '_foreach_atan', + '_foreach_atan_', '_foreach_ceil', '_foreach_ceil_', '_foreach_clamp_max', '_foreach_clamp_max_', + '_foreach_clamp_min', '_foreach_clamp_min_', '_foreach_copy_', '_foreach_cos', '_foreach_cos_', + '_foreach_cosh', '_foreach_cosh_', '_foreach_div', '_foreach_div_', '_foreach_erf', + '_foreach_erf_', '_foreach_erfc', '_foreach_erfc_', '_foreach_exp', '_foreach_exp_', + '_foreach_expm1', '_foreach_expm1_', '_foreach_floor', '_foreach_floor_', '_foreach_frac', + '_foreach_frac_', '_foreach_lerp', '_foreach_lerp_', '_foreach_lgamma', '_foreach_lgamma_', + '_foreach_log', '_foreach_log10', '_foreach_log10_', '_foreach_log1p', '_foreach_log1p_', + '_foreach_log2', '_foreach_log2_', '_foreach_log_', '_foreach_maximum', '_foreach_maximum_', + '_foreach_minimum', '_foreach_minimum_', '_foreach_mul', '_foreach_mul_', '_foreach_neg', + '_foreach_neg_', '_foreach_norm', '_foreach_pow', '_foreach_pow_', '_foreach_reciprocal', + '_foreach_reciprocal_', '_foreach_round', '_foreach_round_', '_foreach_sigmoid', + '_foreach_sigmoid_', '_foreach_sign', '_foreach_sign_', '_foreach_sin', '_foreach_sin_', + '_foreach_sinh', '_foreach_sinh_', '_foreach_sqrt', '_foreach_sqrt_', '_foreach_sub', + '_foreach_sub_', '_foreach_tan', '_foreach_tan_', '_foreach_tanh', '_foreach_tanh_', + '_foreach_trunc', '_foreach_trunc_', '_foreach_zero_', '_from_functional_tensor', + '_functional_assert_async', '_functional_assert_scalar', '_functional_sym_constrain_range', + '_functional_sym_constrain_range_for_size', + '_functionalize_are_all_mutations_hidden_from_autograd', + '_functionalize_are_all_mutations_under_no_grad_or_inference_mode', '_functionalize_commit_update', + '_functionalize_mark_mutation_hidden_from_autograd', '_functionalize_replace', + '_functionalize_sync', '_fused_adam_', '_fused_adamw_', '_fused_dropout', + '_fused_moving_avg_obs_fq_helper', '_fused_moving_avg_obs_fq_helper', '_fused_sdp_choice', + '_fused_sgd_', '_fw_primal_copy', '_grid_sampler_2d_cpu_fallback', + '_has_compatible_shallow_copy_type', '_histogramdd_bin_edges', '_histogramdd_from_bin_cts', + '_histogramdd_from_bin_tensors', '_index_put_impl_', '_indices_copy', '_int_mm', '_is_all_true', + '_is_any_true', '_is_functional_tensor', '_is_zerotensor', '_lazy_clone', '_linalg_check_errors', + '_linalg_det', '_linalg_det', '_linalg_eigh', '_linalg_eigh', '_linalg_slogdet', '_linalg_slogdet', + '_linalg_solve_ex', '_linalg_solve_ex', '_linalg_svd', '_linalg_svd', '_log_softmax', + '_log_softmax_backward_data', '_logcumsumexp', '_lstm_mps', '_lu_with_info', '_lu_with_info', + '_make_dep_token', '_make_dual', '_make_dual_copy', '_make_per_channel_quantized_tensor', + '_make_per_tensor_quantized_tensor', '_masked_scale', '_masked_softmax', '_mixed_dtypes_linear', + '_mkldnn_reshape', '_mkldnn_transpose', '_mkldnn_transpose_', '_mps_convolution', + '_mps_convolution_transpose', '_native_batch_norm_legit', '_native_batch_norm_legit_no_training', + '_native_multi_head_attention', '_neg_view', '_neg_view_copy', '_nested_from_padded', + '_nested_from_padded_and_nested_example', '_nested_get_jagged_dummy', '_nested_get_lengths', + '_nested_get_offsets', '_nested_get_ragged_idx', '_nested_get_values', '_nested_get_values_copy', + '_nested_tensor_from_mask', '_nested_tensor_from_mask_left_aligned', + '_nested_tensor_from_tensor_list', '_nested_tensor_softmax_with_shape', '_nested_view_from_buffer', + '_nested_view_from_buffer_copy', '_nested_view_from_jagged', '_nested_view_from_jagged_copy', + '_nnpack_available', '_nnpack_spatial_convolution', '_pack_padded_sequence', + '_pad_packed_sequence', '_pin_memory', '_prelu_kernel', '_print', '_propagate_xla_data', + '_remove_batch_dim', '_reshape_alias_copy', '_reshape_from_tensor', '_resize_output_', + '_rowwise_prune', '_sample_dirichlet', '_saturate_weight_to_fp16', + '_scaled_dot_product_attention_math', '_scaled_dot_product_cudnn_attention', + '_scaled_dot_product_cudnn_attention', '_scaled_dot_product_efficient_attention', + '_scaled_dot_product_efficient_attention', '_scaled_dot_product_flash_attention', + '_scaled_dot_product_flash_attention', '_scaled_dot_product_flash_attention_for_cpu', + '_scaled_dot_product_flash_attention_for_cpu', '_scaled_mm', '_shape_as_tensor', + '_sobol_engine_draw', '_sobol_engine_ff_', '_sobol_engine_initialize_state_', + '_sobol_engine_scramble_', '_softmax', '_softmax_backward_data', '_sparse_broadcast_to', + '_sparse_broadcast_to_copy', '_sparse_csr_prod', '_sparse_csr_sum', + '_sparse_log_softmax_backward_data', '_sparse_semi_structured_linear', + '_sparse_softmax_backward_data', '_sparse_sparse_matmul', '_sparse_sum', '_stack', + '_standard_gamma', '_standard_gamma_grad', '_sync', '_test_autograd_multiple_dispatch', + '_test_autograd_multiple_dispatch_view', '_test_autograd_multiple_dispatch_view_copy', + '_test_check_tensor', '_test_functorch_fallback', '_test_parallel_materialize', + '_test_serialization_subcmul', '_to_cpu', '_to_functional_tensor', '_to_sparse_semi_structured', + '_transform_bias_rescale_qkv', '_transformer_encoder_layer_fwd', '_trilinear', + '_triton_multi_head_attention', '_triton_scaled_dot_attention', '_unique', '_unique2', + '_unpack_dual', '_unpack_dual', '_unsafe_index', '_unsafe_index_put', '_use_cudnn_ctc_loss', + '_use_cudnn_rnn_flatten_weight', '_validate_compressed_sparse_indices', + '_validate_sparse_bsc_tensor_args', '_validate_sparse_bsr_tensor_args', + '_validate_sparse_compressed_tensor_args', '_validate_sparse_coo_tensor_args', + '_validate_sparse_csc_tensor_args', '_validate_sparse_csr_tensor_args', '_values_copy', + '_weight_int4pack_mm', '_weight_int8pack_mm', '_weight_norm', '_weight_norm_interface', 'abs', + 'abs_', 'absolute', 'acos', 'acos_', 'acosh', 'acosh_', 'adaptive_avg_pool1d', + 'adaptive_max_pool1d', 'add', 'addbmm', 'addcdiv', 'addcmul', 'addmm', 'addmv', 'addmv_', 'addr', + 'adjoint', 'affine_grid_generator', 'alias_copy', 'all', 'allclose', 'alpha_dropout', + 'alpha_dropout_', 'amax', 'amin', 'aminmax', 'aminmax', 'angle', 'any', 'arange', 'arccos', + 'arccos_', 'arccosh', 'arccosh_', 'arcsin', 'arcsin_', 'arcsinh', 'arcsinh_', 'arctan', 'arctan2', + 'arctan_', 'arctanh', 'arctanh_', 'argmax', 'argmin', 'argsort', 'argwhere', 'as_strided', + 'as_strided_', 'as_strided_copy', 'as_strided_scatter', 'as_tensor', 'asarray', 'asin', 'asin_', + 'asinh', 'asinh_', 'atan', 'atan2', 'atan_', 'atanh', 'atanh_', 'avg_pool1d', 'baddbmm', + 'bartlett_window', 'batch_norm', 'batch_norm_backward_elemt', 'batch_norm_backward_reduce', + 'batch_norm_elemt', 'batch_norm_gather_stats', 'batch_norm_gather_stats_with_counts', + 'batch_norm_stats', 'batch_norm_update_stats', 'bernoulli', 'bilinear', + 'binary_cross_entropy_with_logits', 'bincount', 'binomial', 'bitwise_and', 'bitwise_left_shift', + 'bitwise_not', 'bitwise_or', 'bitwise_right_shift', 'bitwise_xor', 'blackman_window', 'bmm', + 'broadcast_to', 'bucketize', 'can_cast', 'cat', 'ccol_indices_copy', 'ceil', 'ceil_', 'celu', + 'celu_', 'channel_shuffle', 'cholesky', 'cholesky_inverse', 'cholesky_solve', + 'choose_qparams_optimized', 'chunk', 'clamp', 'clamp_', 'clamp_max', 'clamp_max_', 'clamp_min', + 'clamp_min_', 'clip', 'clip_', 'clone', 'col_indices_copy', 'column_stack', 'combinations', + 'complex', 'concat', 'concatenate', 'conj', 'conj_physical', 'conj_physical_', 'constant_pad_nd', + 'conv1d', 'conv2d', 'conv3d', 'conv_tbc', 'conv_transpose1d', 'conv_transpose2d', + 'conv_transpose3d', 'convolution', 'copysign', 'corrcoef', 'cos', 'cos_', 'cosh', 'cosh_', + 'cosine_embedding_loss', 'cosine_similarity', 'count_nonzero', 'cov', 'cross', 'crow_indices_copy', + 'ctc_loss', 'cudnn_affine_grid_generator', 'cudnn_batch_norm', 'cudnn_convolution', + 'cudnn_convolution_add_relu', 'cudnn_convolution_relu', 'cudnn_convolution_transpose', + 'cudnn_grid_sampler', 'cudnn_is_acceptable', 'cummax', 'cummax', 'cummin', 'cummin', 'cumprod', + 'cumsum', 'cumulative_trapezoid', 'deg2rad', 'deg2rad_', 'dequantize', 'det', 'detach', 'detach_', + 'detach_copy', 'diag', 'diag_embed', 'diagflat', 'diagonal', 'diagonal_copy', 'diagonal_scatter', + 'diff', 'digamma', 'dist', 'div', 'divide', 'dot', 'dropout', 'dropout_', 'dsmm', 'dsplit', + 'dstack', 'embedding', 'embedding_bag', 'embedding_renorm_', 'empty', 'empty_like', + 'empty_permuted', 'empty_quantized', 'empty_strided', 'eq', 'equal', 'erf', 'erf_', 'erfc', + 'erfc_', 'erfinv', 'exp', 'exp2', 'exp2_', 'exp_', 'expand_copy', 'expm1', 'expm1_', 'eye', + 'fake_quantize_per_channel_affine', 'fake_quantize_per_tensor_affine', 'fbgemm_linear_fp16_weight', + 'fbgemm_linear_fp16_weight_fp32_activation', 'fbgemm_linear_int8_weight', + 'fbgemm_linear_int8_weight_fp32_activation', 'fbgemm_linear_quantize_weight', + 'fbgemm_pack_gemm_matrix_fp16', 'fbgemm_pack_quantized_matrix', 'feature_alpha_dropout', + 'feature_alpha_dropout_', 'feature_dropout', 'feature_dropout_', 'fill', 'fill_', 'fix', 'fix_', + 'flatten', 'flip', 'fliplr', 'flipud', 'float_power', 'floor', 'floor_', 'floor_divide', 'fmax', + 'fmin', 'fmod', 'frac', 'frac_', 'frexp', 'frexp', 'frobenius_norm', 'from_file', 'from_numpy', + 'frombuffer', 'full', 'full_like', 'fused_moving_avg_obs_fake_quant', 'gather', 'gcd', 'gcd_', + 'ge', 'geqrf', 'geqrf', 'ger', 'get_default_dtype', 'get_num_interop_threads', 'get_num_threads', + 'gradient', 'greater', 'greater_equal', 'grid_sampler', 'grid_sampler_2d', 'grid_sampler_3d', + 'group_norm', 'gru', 'gru_cell', 'gt', 'hamming_window', 'hann_window', 'hardshrink', 'heaviside', + 'hinge_embedding_loss', 'histc', 'histogram', 'histogram', 'histogramdd', 'histogramdd', 'hsmm', + 'hsplit', 'hspmm', 'hstack', 'hypot', 'i0', 'i0_', 'igamma', 'igammac', 'imag', 'index_add', + 'index_copy', 'index_fill', 'index_put', 'index_put_', 'index_reduce', 'index_select', + 'indices_copy', 'init_num_threads', 'inner', 'instance_norm', 'int_repr', 'inverse', 'is_complex', + 'is_conj', 'is_distributed', 'is_floating_point', 'is_grad_enabled', 'is_inference', + 'is_inference_mode_enabled', 'is_neg', 'is_nonzero', 'is_same_size', 'is_signed', + 'is_vulkan_available', 'isclose', 'isfinite', 'isin', 'isinf', 'isnan', 'isneginf', 'isposinf', + 'isreal', 'istft', 'kaiser_window', 'kl_div', 'kron', 'kthvalue', 'kthvalue', 'layer_norm', 'lcm', + 'lcm_', 'ldexp', 'ldexp_', 'le', 'lerp', 'less', 'less_equal', 'lgamma', 'linspace', 'log', + 'log10', 'log10_', 'log1p', 'log1p_', 'log2', 'log2_', 'log_', 'log_softmax', 'logaddexp', + 'logaddexp2', 'logcumsumexp', 'logdet', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', + 'logit', 'logit_', 'logspace', 'logsumexp', 'lstm', 'lstm_cell', 'lt', 'lu_solve', 'lu_unpack', + 'lu_unpack', 'margin_ranking_loss', 'masked_fill', 'masked_scatter', 'masked_select', 'matmul', + 'matrix_exp', 'matrix_power', 'max', 'max', 'max_pool1d', 'max_pool1d_with_indices', 'max_pool2d', + 'max_pool3d', 'maximum', 'mean', 'median', 'median', 'min', 'min', 'minimum', 'miopen_batch_norm', + 'miopen_convolution', 'miopen_convolution_add_relu', 'miopen_convolution_relu', + 'miopen_convolution_transpose', 'miopen_depthwise_convolution', 'miopen_rnn', + 'mkldnn_adaptive_avg_pool2d', 'mkldnn_convolution', 'mkldnn_linear_backward_weights', + 'mkldnn_max_pool2d', 'mkldnn_max_pool3d', 'mkldnn_rnn_layer', 'mm', 'mode', 'mode', 'moveaxis', + 'movedim', 'msort', 'mul', 'multinomial', 'multiply', 'mv', 'mvlgamma', 'nan_to_num', + 'nan_to_num_', 'nanmean', 'nanmedian', 'nanmedian', 'nanquantile', 'nansum', 'narrow', + 'narrow_copy', 'native_batch_norm', 'native_channel_shuffle', 'native_dropout', + 'native_group_norm', 'native_layer_norm', 'native_norm', 'ne', 'neg', 'neg_', 'negative', + 'negative_', 'nextafter', 'nonzero', 'nonzero_static', 'norm_except_dim', 'normal', 'not_equal', + 'nuclear_norm', 'numel', 'ones', 'ones_like', 'orgqr', 'ormqr', 'outer', 'pairwise_distance', + 'pdist', 'permute', 'permute_copy', 'pinverse', 'pixel_shuffle', 'pixel_unshuffle', 'poisson', + 'poisson_nll_loss', 'polar', 'polygamma', 'positive', 'pow', 'prelu', 'prod', 'promote_types', + 'put', 'q_per_channel_axis', 'q_per_channel_scales', 'q_per_channel_zero_points', 'q_scale', + 'q_zero_point', 'qr', 'qr', 'quantile', 'quantize_per_channel', 'quantize_per_tensor', + 'quantize_per_tensor_dynamic', 'quantized_batch_norm', 'quantized_gru_cell', 'quantized_lstm_cell', + 'quantized_max_pool1d', 'quantized_max_pool2d', 'quantized_max_pool3d', 'quantized_rnn_relu_cell', + 'quantized_rnn_tanh_cell', 'rad2deg', 'rad2deg_', 'rand', 'rand_like', 'randint', 'randint_like', + 'randn', 'randn_like', 'randperm', 'range', 'ravel', 'real', 'reciprocal', 'reciprocal_', 'relu', + 'relu_', 'remainder', 'renorm', 'repeat_interleave', 'reshape', 'resize_as_', 'resize_as_sparse_', + 'resolve_conj', 'resolve_neg', 'result_type', 'rnn_relu', 'rnn_relu_cell', 'rnn_tanh', + 'rnn_tanh_cell', 'roll', 'rot90', 'round', 'round_', 'row_indices_copy', 'row_stack', 'rrelu', + 'rrelu_', 'rsqrt', 'rsqrt_', 'rsub', 'saddmm', 'scalar_tensor', 'scatter', 'scatter_add', + 'scatter_reduce', 'searchsorted', 'segment_reduce', 'select', 'select_copy', 'select_scatter', + 'selu', 'selu_', 'set_flush_denormal', 'set_num_interop_threads', 'set_num_threads', 'sgn', + 'sigmoid', 'sigmoid_', 'sign', 'signbit', 'sin', 'sin_', 'sinc', 'sinc_', 'sinh', 'sinh_', + 'slice_copy', 'slice_inverse', 'slice_scatter', 'slogdet', 'slogdet', 'smm', 'softmax', 'sort', + 'sort', 'sparse_bsc_tensor', 'sparse_bsr_tensor', 'sparse_compressed_tensor', 'sparse_coo_tensor', + 'sparse_csc_tensor', 'sparse_csr_tensor', 'split_copy', 'split_with_sizes', + 'split_with_sizes_copy', 'spmm', 'sqrt', 'sqrt_', 'square', 'square_', 'squeeze', 'squeeze_copy', + 'sspaddmm', 'stack', 'std', 'std_mean', 'sub', 'subtract', 'sum', 'svd', 'svd', 'swapaxes', + 'swapdims', 'sym_constrain_range', 'sym_constrain_range_for_size', 't', 't_copy', 'take', + 'take_along_dim', 'tan', 'tan_', 'tanh', 'tanh_', 'tensor', 'tensor_split', 'threshold', + 'threshold_', 'tile', 'topk', 'topk', 'trace', 'transpose', 'transpose_copy', 'trapezoid', 'trapz', + 'triangular_solve', 'triangular_solve', 'tril', 'tril_indices', 'triplet_margin_loss', 'triu', + 'triu_indices', 'true_divide', 'trunc', 'trunc_', 'unbind', 'unbind_copy', 'unflatten', + 'unfold_copy', 'unique_dim', 'unsafe_chunk', 'unsafe_split', 'unsafe_split_with_sizes', + 'unsqueeze', 'unsqueeze_copy', 'values_copy', 'vander', 'var', 'var_mean', 'vdot', + 'view_as_complex', 'view_as_complex_copy', 'view_as_real', 'view_as_real_copy', 'view_copy', + 'vsplit', 'vstack', 'where', 'xlogy', 'xlogy_', 'zero_', 'zeros', 'zeros_like'] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/__config__.py b/llmeval-env/lib/python3.10/site-packages/torch/__config__.py new file mode 100644 index 0000000000000000000000000000000000000000..f7e3e209654a8846ddc42d31220101340043c276 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/__config__.py @@ -0,0 +1,22 @@ +import torch + + +def show(): + """ + Return a human-readable string with descriptions of the + configuration of PyTorch. + """ + return torch._C._show_config() + + +# TODO: In principle, we could provide more structured version/config +# information here. For now only CXX_FLAGS is exposed, as Timer +# uses them. +def _cxx_flags(): + """Returns the CXX_FLAGS used when building PyTorch.""" + return torch._C._cxx_flags() + + +def parallel_info(): + r"""Returns detailed string with parallelization settings""" + return torch._C._parallel_info() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/__future__.py b/llmeval-env/lib/python3.10/site-packages/torch/__future__.py new file mode 100644 index 0000000000000000000000000000000000000000..f172ee3c8fe223aa316667f37f356e5b6658d20e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/__future__.py @@ -0,0 +1,75 @@ +_overwrite_module_params_on_conversion: bool = False +_swap_module_params_on_conversion: bool = False + + +def set_overwrite_module_params_on_conversion(value: bool) -> None: + """ + Sets whether to assign new tensors to the parameters instead of changing the + existing parameters in-place when converting an ``nn.Module``. + + When enabled, the following methods will assign new parameters to the module: + + #. ``module.{device}()`` (e.g. :meth:`nn.Module.cuda()`) for moving a module between devices + #. ``module.{dtype}()`` (e.g. :meth:`nn.Module.float()`) for converting a module to a different dtype + #. :meth:`nn.Module.to` + #. :meth:`nn.Module.to_empty` + + Args: + value (bool): Whether to assign new tensors or not. + + """ + global _overwrite_module_params_on_conversion + _overwrite_module_params_on_conversion = value + + +def get_overwrite_module_params_on_conversion() -> bool: + """ + Returns whether to assign new tensors to the parameters instead of changing the + existing parameters in-place when converting an :class:`torch.nn.Module`. Defaults to ``False``. + + See :func:`~torch.__future__.set_overwrite_module_params_on_conversion` for more information. + """ + return _overwrite_module_params_on_conversion + + +def set_swap_module_params_on_conversion(value: bool) -> None: + """ + Sets whether to use :func:`~torch.utils.swap_tensors` instead of setting ``.data`` to + change the existing parameters in-place when converting an ``nn.Module`` and instead + of ``param.copy_(state_dict[key])`` when loading a state dict into an ``nn.Module``. + + .. note:: + This function takes precedence over :func:`~torch.__future__.get_overwrite_module_params_on_conversion` + + When enabled, the following methods will swap the existing parameters in-place: + + #. ``module.{device}()`` (e.g. :meth:`nn.Module.cuda()`) for moving a module between devices + #. ``module.{dtype}()`` (e.g. :meth:`nn.Module.float()`) for converting a module to a different dtype + #. :meth:`nn.Module.to` + #. :meth:`nn.Module.to_empty` + #. :meth:`nn.Module.load_state_dict` + + The semantics for :meth:`~nn.Module.load_state_dict` when this is set are as follows: + + #. For each parameter/buffer, its corresponding ``state_dict['key']`` is transformed via + :meth:`~torch.Tensor.module_load` (i.e. ``res = param.module_load(state_dict['key'])``) + #. If necessary, ``res`` will be wrapped in an :class:`~nn.Parameter` + #. The parameter/buffer in the module will be swapped via :func:`~torch.utils.swap_tensors` + with ``res`` + + Args: + value (bool): Whether to use :func:`~torch.utils.swap_tensors` or not. + + """ + global _swap_module_params_on_conversion + _swap_module_params_on_conversion = value + + +def get_swap_module_params_on_conversion() -> bool: + """ + Returns whether to use :func:`~torch.utils.swap_tensors` instead of setting .data to + change the existing parameters in-place when converting an ``nn.Module``. Defaults to ``False``. + + See :func:`~torch.__future__.set_swap_module_params_on_conversion` for more information. + """ + return _swap_module_params_on_conversion diff --git a/llmeval-env/lib/python3.10/site-packages/torch/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d381712b4a356e4ad2c066c3b87abead312f4b81 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/__init__.py @@ -0,0 +1,2038 @@ + +r""" +The torch package contains data structures for multi-dimensional +tensors and defines mathematical operations over these tensors. +Additionally, it provides many utilities for efficient serialization of +Tensors and arbitrary types, and other useful utilities. + +It has a CUDA counterpart, that enables you to run your tensor computations +on an NVIDIA GPU with compute capability >= 3.0. +""" + +import math +import os +import sys +import platform +import textwrap +import ctypes +import inspect +import threading + +# multipy/deploy is setting this import before importing torch, this is the most +# reliable way we have to detect if we're running within deploy. +# https://github.com/pytorch/multipy/blob/d60f34ad38c371e441fe7ffdb77a3c3dda5a5d19/multipy/runtime/interpreter/interpreter_impl.cpp#L134-L137 +def _running_with_deploy(): + return sys.modules.get("torch._meta_registrations", None) is object + +from ._utils import _import_dotted_name, classproperty +from ._utils import _functionalize_sync as _sync +from ._utils_internal import get_file_path, prepare_multiprocessing_environment, \ + USE_RTLD_GLOBAL_WITH_LIBTORCH, USE_GLOBAL_DEPS + +# TODO(torch_deploy) figure out how to freeze version.py in fbcode build +if _running_with_deploy(): + __version__ = "torch-deploy-1.8" +else: + from .torch_version import __version__ as __version__ + +from typing import Any, Callable, Dict, Optional, Set, Tuple, Type, TYPE_CHECKING, Union, List +import builtins + +__all__ = [ + 'typename', 'is_tensor', 'is_storage', + 'set_default_tensor_type', 'set_default_device', 'get_default_device', + 'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed', + 'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul', + 'no_grad', 'enable_grad', 'rand', 'randn', 'inference_mode', + 'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage', + 'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage', + 'TypedStorage', 'UntypedStorage', + 'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor', + 'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor', + 'lobpcg', 'use_deterministic_algorithms', + 'are_deterministic_algorithms_enabled', + 'is_deterministic_algorithms_warn_only_enabled', + 'set_deterministic_debug_mode', 'get_deterministic_debug_mode', + 'set_float32_matmul_precision', 'get_float32_matmul_precision', + 'set_warn_always', 'is_warn_always_enabled', 'SymInt', 'SymFloat', + 'SymBool', 'sym_not', 'unravel_index', + 'sym_int', 'sym_float', 'sym_max', 'sym_min', 'sym_ite', 'compile', 'vmap', + 'export', 'autocast', 'cond', 'GradScaler', +] + +################################################################################ +# Load the extension module +################################################################################ + +if sys.platform == 'win32': + pfiles_path = os.getenv('ProgramFiles', 'C:\\Program Files') + py_dll_path = os.path.join(sys.exec_prefix, 'Library', 'bin') + th_dll_path = os.path.join(os.path.dirname(__file__), 'lib') + + # When users create a virtualenv that inherits the base environment, + # we will need to add the corresponding library directory into + # DLL search directories. Otherwise, it will rely on `PATH` which + # is dependent on user settings. + if sys.exec_prefix != sys.base_exec_prefix: + base_py_dll_path = os.path.join(sys.base_exec_prefix, 'Library', 'bin') + else: + base_py_dll_path = '' + + dll_paths = list(filter(os.path.exists, [th_dll_path, py_dll_path, base_py_dll_path])) + + if all(not os.path.exists(os.path.join(p, 'nvToolsExt64_1.dll')) for p in dll_paths): + nvtoolsext_dll_path = os.path.join( + os.getenv('NVTOOLSEXT_PATH', os.path.join(pfiles_path, 'NVIDIA Corporation', 'NvToolsExt')), 'bin', 'x64') + else: + nvtoolsext_dll_path = '' + + from .version import cuda as cuda_version + import glob + if cuda_version and all(not glob.glob(os.path.join(p, 'cudart64*.dll')) for p in dll_paths): + cuda_version_1 = cuda_version.replace('.', '_') + cuda_path_var = 'CUDA_PATH_V' + cuda_version_1 + default_path = os.path.join(pfiles_path, 'NVIDIA GPU Computing Toolkit', 'CUDA', 'v' + cuda_version) + cuda_path = os.path.join(os.getenv(cuda_path_var, default_path), 'bin') + else: + cuda_path = '' + + dll_paths.extend(filter(os.path.exists, [nvtoolsext_dll_path, cuda_path])) + + kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True) + with_load_library_flags = hasattr(kernel32, 'AddDllDirectory') + prev_error_mode = kernel32.SetErrorMode(0x0001) + + kernel32.LoadLibraryW.restype = ctypes.c_void_p + if with_load_library_flags: + kernel32.LoadLibraryExW.restype = ctypes.c_void_p + + for dll_path in dll_paths: + os.add_dll_directory(dll_path) + + try: + ctypes.CDLL('vcruntime140.dll') + ctypes.CDLL('msvcp140.dll') + ctypes.CDLL('vcruntime140_1.dll') + except OSError: + print('''Microsoft Visual C++ Redistributable is not installed, this may lead to the DLL load failure. + It can be downloaded at https://aka.ms/vs/16/release/vc_redist.x64.exe''') + + dlls = glob.glob(os.path.join(th_dll_path, '*.dll')) + path_patched = False + for dll in dlls: + is_loaded = False + if with_load_library_flags: + res = kernel32.LoadLibraryExW(dll, None, 0x00001100) + last_error = ctypes.get_last_error() + if res is None and last_error != 126: + err = ctypes.WinError(last_error) + err.strerror += f' Error loading "{dll}" or one of its dependencies.' + raise err + elif res is not None: + is_loaded = True + if not is_loaded: + if not path_patched: + os.environ['PATH'] = ';'.join(dll_paths + [os.environ['PATH']]) + path_patched = True + res = kernel32.LoadLibraryW(dll) + if res is None: + err = ctypes.WinError(ctypes.get_last_error()) + err.strerror += f' Error loading "{dll}" or one of its dependencies.' + raise err + + kernel32.SetErrorMode(prev_error_mode) + + +def _preload_cuda_deps(lib_folder, lib_name): + """Preloads cuda deps if they could not be found otherwise.""" + # Should only be called on Linux if default path resolution have failed + assert platform.system() == 'Linux', 'Should only be called on Linux' + import glob + lib_path = None + for path in sys.path: + nvidia_path = os.path.join(path, 'nvidia') + if not os.path.exists(nvidia_path): + continue + candidate_lib_paths = glob.glob(os.path.join(nvidia_path, lib_folder, 'lib', lib_name)) + if candidate_lib_paths and not lib_path: + lib_path = candidate_lib_paths[0] + if lib_path: + break + if not lib_path: + raise ValueError(f"{lib_name} not found in the system path {sys.path}") + ctypes.CDLL(lib_path) + + +# See Note [Global dependencies] +def _load_global_deps() -> None: + if _running_with_deploy() or platform.system() == 'Windows': + return + + lib_name = 'libtorch_global_deps' + ('.dylib' if platform.system() == 'Darwin' else '.so') + here = os.path.abspath(__file__) + lib_path = os.path.join(os.path.dirname(here), 'lib', lib_name) + + try: + ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL) + except OSError as err: + # Can only happen for wheel with cuda libs as PYPI deps + # As PyTorch is not purelib, but nvidia-*-cu12 is + cuda_libs: Dict[str, str] = { + 'cublas': 'libcublas.so.*[0-9]', + 'cudnn': 'libcudnn.so.*[0-9]', + 'cuda_nvrtc': 'libnvrtc.so.*[0-9]', + 'cuda_runtime': 'libcudart.so.*[0-9]', + 'cuda_cupti': 'libcupti.so.*[0-9]', + 'cufft': 'libcufft.so.*[0-9]', + 'curand': 'libcurand.so.*[0-9]', + 'cusolver': 'libcusolver.so.*[0-9]', + 'cusparse': 'libcusparse.so.*[0-9]', + 'nccl': 'libnccl.so.*[0-9]', + 'nvtx': 'libnvToolsExt.so.*[0-9]', + } + is_cuda_lib_err = [lib for lib in cuda_libs.values() if lib.split('.')[0] in err.args[0]] + if not is_cuda_lib_err: + raise err + for lib_folder, lib_name in cuda_libs.items(): + _preload_cuda_deps(lib_folder, lib_name) + ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL) + + +if (USE_RTLD_GLOBAL_WITH_LIBTORCH or os.getenv('TORCH_USE_RTLD_GLOBAL')) and \ + (_running_with_deploy() or platform.system() != 'Windows'): + # Do it the hard way. You might want to load libtorch with RTLD_GLOBAL in a + # few circumstances: + # + # 1. You're in a build environment (e.g., fbcode) where + # libtorch_global_deps is not available, but you still need + # to get mkl to link in with RTLD_GLOBAL or it will just + # not work. + # + # 2. You're trying to run PyTorch under UBSAN and you need + # to ensure that only one copy of libtorch is loaded, so + # vptr checks work properly + # + # If you're using this setting, you must verify that all the libraries + # you load consistently use the same libstdc++, or you may have + # mysterious segfaults. + # + old_flags = sys.getdlopenflags() + sys.setdlopenflags(os.RTLD_GLOBAL | os.RTLD_LAZY) + from torch._C import * # noqa: F403 + sys.setdlopenflags(old_flags) + del old_flags + +else: + # Easy way. You want this most of the time, because it will prevent + # C++ symbols from libtorch clobbering C++ symbols from other + # libraries, leading to mysterious segfaults. + # + # If building in an environment where libtorch_global_deps isn't available + # like parts of fbsource, but where RTLD_GLOBAL causes segfaults, you will + # want USE_RTLD_GLOBAL_WITH_LIBTORCH = False and USE_GLOBAL_DEPS = False + # + # See Note [Global dependencies] + if USE_GLOBAL_DEPS: + _load_global_deps() + from torch._C import * # noqa: F403 + +# Appease the type checker; ordinarily this binding is inserted by the +# torch._C module initialization code in C +if TYPE_CHECKING: + from . import _C as _C + +class SymInt: + """ + Like an int (including magic methods), but redirects all operations on the + wrapped node. This is used in particular to symbolically record operations + in the symbolic shape workflow. + """ + + def __init__(self, node): + # This field MUST be named node; C++ binding code assumes that this + # class has a field named node that stores SymNode + self.node = node + + def __bool__(self): + return builtins.bool(self != 0) + + def __int__(self): + return self.node.int_() + + def __index__(self): + return self.node.int_() + + # Magic methods installed by torch.fx.experimental.sym_node + + def __eq__(self, other: object) -> builtins.bool: + raise AssertionError("type stub not overridden") + + def __lt__(self, other) -> builtins.bool: + raise AssertionError("type stub not overridden") + + def __gt__(self, other) -> builtins.bool: + raise AssertionError("type stub not overridden") + + def __le__(self, other) -> builtins.bool: + raise AssertionError("type stub not overridden") + + def __ge__(self, other) -> builtins.bool: + raise AssertionError("type stub not overridden") + + def __add__(self, other) -> "SymInt": + raise AssertionError("type stub not overridden") + + def __mul__(self, other) -> "SymInt": + raise AssertionError("type stub not overridden") + + def __sym_max__(self, other): + raise AssertionError("type stub not overridden") + + def __sym_min__(self, other): + raise AssertionError("type stub not overridden") + + def __sym_float__(self): + raise AssertionError("type stub not overridden") + + def __neg__(self): + raise AssertionError("type stub not overridden") + + def __repr__(self): + return str(self.node) + + def __hash__(self) -> builtins.int: + if self.node.is_nested_int(): + return hash(self.node.nested_int()) + else: + # We could support constant SymInts as well, but not doing it for now + raise TypeError("unhashable type: non-nested SymInt") + +class SymFloat: + """ + Like an float (including magic methods), but redirects all operations on the + wrapped node. This is used in particular to symbolically record operations + in the symbolic shape workflow. + """ + + def __init__(self, node): + # This field MUST be named node; C++ binding code assumes that this + # class has a field named node that stores SymNode + self.node = node + + def __bool__(self): + return self.node.bool_() + + # Magic methods installed by torch.fx.experimental.sym_node + + def __eq__(self, other: object) -> builtins.bool: + raise AssertionError("type stub not overridden") + + def __lt__(self, other) -> builtins.bool: + raise AssertionError("type stub not overridden") + + def __gt__(self, other) -> builtins.bool: + raise AssertionError("type stub not overridden") + + def __le__(self, other) -> builtins.bool: + raise AssertionError("type stub not overridden") + + def __ge__(self, other) -> builtins.bool: + raise AssertionError("type stub not overridden") + + def __sym_max__(self, other): + raise AssertionError("type stub not overridden") + + def __sym_min__(self, other): + raise AssertionError("type stub not overridden") + + def __sym_int__(self): + raise AssertionError("type stub not overridden") + + def is_integer(self): + """Return True if the float is an integer.""" + raise AssertionError("type stub not overridden") + + def __repr__(self): + return self.node.str() + +class SymBool: + """ + Like an bool (including magic methods), but redirects all operations on the + wrapped node. This is used in particular to symbolically record operations + in the symbolic shape workflow. + + Unlike regular bools, regular boolean operators will force extra guards instead + of symbolically evaluate. Use the bitwise operators instead to handle this. + """ + + def __init__(self, node): + # This field MUST be named node; C++ binding code assumes that this + # class has a field named node that stores SymNode + self.node = node + + def __bool__(self): + return self.node.bool_() + + def __int__(self): + return builtins.int(self.node.bool_()) + + # Magic methods installed by torch.fx.experimental.sym_node + def __and__(self, other) -> "SymBool": + raise AssertionError("type stub not overridden") + + def __or__(self, other) -> "SymBool": + raise AssertionError("type stub not overridden") + + # We very carefully define __sym_not__, and not a number of other + # plausible alternatives: + # + # - We do not override __not__ because this is not a real magic + # method; you cannot override the meaning of the not builtin in + # Python. We use the name 'sym_not' to clarify that in user code you + # cannot use the builtin not or operator.not_ or operator.__not__ and + # hit this magic method; you must use our custom sym_not operator. + # + # - We do not override the __invert__ method because SymBool is + # meant to be usable in situations where bool is expected. However, + # bitwise negation ~a does the wrong thing with booleans (because + # bool is a subclass of int, so ~1 = -2 which is not falseish.) + # This would be a giant footgun, so we get around it by defining + # our own operator. Note that bitwise and/or do the right thing, + # so we reuse the conventional operators there for readability. + # + def __sym_not__(self) -> "SymBool": + raise AssertionError("type stub not overridden") + + def __sym_ite__(self, then_val, else_val): + raise AssertionError("type stub not overridden") + + def __eq__(self, other) -> builtins.bool: + raise AssertionError("type stub not overridden") + + def __repr__(self): + return str(self.node) + + def __hash__(self): + if self.node.is_constant(): + return hash(self.node.bool_()) + else: + raise TypeError("unhashable type: SymBool") + +def sym_not(a): + r""" SymInt-aware utility for logical negation. + + Args: + a (SymBool or bool): Object to negate + """ + import sympy + from .overrides import has_torch_function_unary, handle_torch_function + + if has_torch_function_unary(a): + return handle_torch_function(sym_not, (a,), a) + if hasattr(a, '__sym_not__'): + return a.__sym_not__() + if isinstance(a, sympy.Basic): + return ~a # type: ignore[operator] + return not a + +def sym_float(a): + r""" SymInt-aware utility for float casting. + + Args: + a (SymInt, SymFloat, or object): Object to cast + """ + from .overrides import has_torch_function_unary, handle_torch_function + + if has_torch_function_unary(a): + return handle_torch_function(sym_float, (a,), a) + if isinstance(a, SymFloat): + return a + elif hasattr(a, '__sym_float__'): + return a.__sym_float__() + return py_float(a) # type: ignore[operator] + + +def sym_int(a): + r""" SymInt-aware utility for int casting. + + Args: + a (SymInt, SymFloat, or object): Object to cast + """ + from .overrides import has_torch_function_unary, handle_torch_function + + if has_torch_function_unary(a): + return handle_torch_function(sym_int, (a,), a) + if isinstance(a, SymInt): + return a + elif isinstance(a, SymFloat): + return math.floor(a) if a >= 0 else math.ceil(a) # type: ignore[arg-type, call-overload] + return py_int(a) # type: ignore[operator] + +def sym_max(a, b): + """ SymInt-aware utility for max().""" + from .overrides import has_torch_function, handle_torch_function + + if has_torch_function((a, b)): + return handle_torch_function(sym_max, (a, b), a, b) + if isinstance(a, (SymInt, SymFloat)): + return a.__sym_max__(b) + elif isinstance(b, (SymInt, SymFloat)): + # NB: If you actually care about preserving output type exactly + # if you do something like max(0, 0.0), it is NOT sound to treat + # min/max as commutative + return b.__sym_max__(a) + return builtins.max(a, b) # type: ignore[operator] + +def sym_min(a, b): + """ SymInt-aware utility for max().""" + from .overrides import has_torch_function, handle_torch_function + + if has_torch_function((a, b)): + return handle_torch_function(sym_min, (a, b), a, b) + if isinstance(a, (SymInt, SymFloat)): + return a.__sym_min__(b) + elif isinstance(b, (SymInt, SymFloat)): + return b.__sym_min__(a) + return builtins.min(a, b) # type: ignore[operator] + +# Drop in replacement for math.sqrt, math.sin, math.cos etc +current_module = sys.modules[__name__] + +def _get_sym_math_fn(name): + def fn(a): + from .overrides import has_torch_function_unary, handle_torch_function + + if has_torch_function_unary(a): + return handle_torch_function(fn, (a,), a) + if hasattr(a, f"__sym_{name}__"): + return getattr(a, f"__sym_{name}__")() + return getattr(math, name)(a) + + return fn + +for name in ("sqrt", "cos", "cosh", "sin", "sinh", "tan", "tanh", "asin", "acos", "atan"): + sym_name = f"_sym_{name}" + fn = _get_sym_math_fn(name) + fn.__qualname__ = fn.__name__ = sym_name + setattr(current_module, sym_name, fn) + +# Adding temporary shortcut +sym_sqrt = current_module._sym_sqrt +__all__.append("sym_sqrt") + +del fn, name, sym_name, current_module # type: ignore[possibly-undefined] + + +def sym_ite(b, t, f): + from .overrides import has_torch_function, handle_torch_function + + if has_torch_function((b, t, f)): + return handle_torch_function(sym_ite, (b, t, f), b, t, f) + assert isinstance(b, (SymBool, builtins.bool)) and type(t) == type(f) + if isinstance(b, SymBool): + return b.__sym_ite__(t, f) + return t if b else f + +# Check to see if we can load C extensions, and if not provide some guidance +# on what the problem might be. +try: + # _initExtension is chosen (arbitrarily) as a sentinel. + from torch._C import _initExtension +except ImportError: + import torch._C as _C_for_compiled_check + + # The __file__ check only works for Python 3.7 and above. + if _C_for_compiled_check.__file__ is None: + raise ImportError(textwrap.dedent(''' + Failed to load PyTorch C extensions: + It appears that PyTorch has loaded the `torch/_C` folder + of the PyTorch repository rather than the C extensions which + are expected in the `torch._C` namespace. This can occur when + using the `install` workflow. e.g. + $ python setup.py install && python -c "import torch" + + This error can generally be solved using the `develop` workflow + $ python setup.py develop && python -c "import torch" # This should succeed + or by running Python from a different directory. + ''').strip()) from None + raise # If __file__ is not None the cause is unknown, so just re-raise. + +for name in dir(_C): + if name[0] != '_' and not name.endswith('Base'): + __all__.append(name) + obj = getattr(_C, name) + if (isinstance(obj, Callable) or inspect.isclass(obj)): # type: ignore[arg-type] + if (obj.__module__ != 'torch'): + # TODO: fix their module from C++ side + if name not in ['DisableTorchFunctionSubclass', 'DisableTorchFunction', 'Generator']: + obj.__module__ = 'torch' + elif name == 'TensorBase': + # issue 109438 / pr 109940. Prevent TensorBase from being copied into torch. + delattr(sys.modules[__name__], name) + +if not TYPE_CHECKING: + # issue 38137 and python issue 43367. Submodules of a C extension are + # non-standard, and attributes of those submodules cannot be pickled since + # pickle expect to be able to import them as "from _C.sub import attr" + # which fails with "_C is not a package + for attr in dir(_C): + candidate = getattr(_C, attr) + if type(candidate) is type(_C): + # submodule + if f'torch._C.{attr}' not in sys.modules: + sys.modules[f'torch._C.{attr}'] = candidate + + +################################################################################ +# Define basic utilities +################################################################################ + + +def typename(o): + if isinstance(o, torch.Tensor): + return o.type() + + module = '' + class_name = '' + if hasattr(o, '__module__') and o.__module__ != 'builtins' \ + and o.__module__ != '__builtin__' and o.__module__ is not None: + module = o.__module__ + '.' + + if hasattr(o, '__qualname__'): + class_name = o.__qualname__ + elif hasattr(o, '__name__'): + class_name = o.__name__ + else: + class_name = o.__class__.__name__ + + return module + class_name + + +def is_tensor(obj): + r"""Returns True if `obj` is a PyTorch tensor. + + Note that this function is simply doing ``isinstance(obj, Tensor)``. + Using that ``isinstance`` check is better for typechecking with mypy, + and more explicit - so it's recommended to use that instead of + ``is_tensor``. + + Args: + obj (Object): Object to test + Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> torch.is_tensor(x) + True + + """ + return isinstance(obj, torch.Tensor) + + +def is_storage(obj): + r"""Returns True if `obj` is a PyTorch storage object. + + Args: + obj (Object): Object to test + """ + return type(obj) in _storage_classes + + +_GLOBAL_DEVICE_CONTEXT = threading.local() + + +def get_default_device() -> "torch.device": + r"""Gets the default ``torch.Tensor`` to be allocated on ``device``""" + global _GLOBAL_DEVICE_CONTEXT + if hasattr(_GLOBAL_DEVICE_CONTEXT, "device_context"): + device = _GLOBAL_DEVICE_CONTEXT.device_context.device + if device.index is not None: + return device + else: + # TODO: Call like get_device_index() method corresponding to + # each device type + return torch.tensor([]).device + else: + return torch.device("cpu") + + +def set_default_device(device): + """Sets the default ``torch.Tensor`` to be allocated on ``device``. This + does not affect factory function calls which are called with an explicit + ``device`` argument. Factory calls will be performed as if they + were passed ``device`` as an argument. + + To only temporarily change the default device instead of setting it + globally, use ``with torch.device(device):`` instead. + + The default device is initially ``cpu``. If you set the default tensor + device to another device (e.g., ``cuda``) without a device index, tensors + will be allocated on whatever the current device for the device type, + even after :func:`torch.cuda.set_device` is called. + + .. warning:: + + This function imposes a slight performance cost on every Python + call to the torch API (not just factory functions). If this + is causing problems for you, please comment on + https://github.com/pytorch/pytorch/issues/92701 + + .. note:: + + This doesn't affect functions that create tensors that share the same memory as the input, like: + :func:`torch.from_numpy` and :func:`torch.frombuffer` + + Args: + device (device or string): the device to set as default + + Example:: + + >>> # xdoctest: +SKIP("requires cuda, changes global state") + >>> torch.get_default_device() + device(type='cpu') + >>> torch.set_default_device('cuda') # current device is 0 + >>> torch.get_default_device() + device(type='cuda', index=0) + >>> torch.set_default_device('cuda') + >>> torch.cuda.set_device('cuda:1') # current device is 1 + >>> torch.get_default_device() + device(type='cuda', index=1) + >>> torch.set_default_device('cuda:1') + >>> torch.get_default_device() + device(type='cuda', index=1) + + """ + global _GLOBAL_DEVICE_CONTEXT + if hasattr(_GLOBAL_DEVICE_CONTEXT, "device_context"): + device_context = _GLOBAL_DEVICE_CONTEXT.device_context + if device_context is not None: + device_context.__exit__(None, None, None) + + if device is None: + device_context = None + else: + from torch.utils._device import DeviceContext + device_context = DeviceContext(device) + device_context.__enter__() + _GLOBAL_DEVICE_CONTEXT.device_context = device_context + + +def set_default_tensor_type(t): + r""" + .. warning:: + + This function is deprecated as of PyTorch 2.1, please use :func:`torch.set_default_dtype()` and + :func:`torch.set_default_device()` as alternatives. + + Sets the default ``torch.Tensor`` type to floating point tensor type + ``t``. This type will also be used as default floating point type for + type inference in :func:`torch.tensor`. + + The default floating point tensor type is initially ``torch.FloatTensor``. + + Args: + t (type or string): the floating point tensor type or its name + + Example:: + + >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?") + >>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32 + torch.float32 + >>> torch.set_default_tensor_type(torch.DoubleTensor) + >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor + torch.float64 + + """ + if isinstance(t, str): + t = _import_dotted_name(t) + _C._set_default_tensor_type(t) + + +def set_default_dtype(d): + r""" + + Sets the default floating point dtype to :attr:`d`. Supports torch.float32 + and torch.float64 as inputs. Other dtypes may be accepted without complaint + but are not supported and are unlikely to work as expected. + + When PyTorch is initialized its default floating point dtype is torch.float32, + and the intent of set_default_dtype(torch.float64) is to facilitate NumPy-like + type inference. The default floating point dtype is used to: + + 1. Implicitly determine the default complex dtype. When the default floating point + type is float32 the default complex dtype is complex64, and when the default + floating point type is float64 the default complex type is complex128. + 2. Infer the dtype for tensors constructed using Python floats or complex Python + numbers. See examples below. + 3. Determine the result of type promotion between bool and integer tensors and + Python floats and complex Python numbers. + + Args: + d (:class:`torch.dtype`): the floating point dtype to make the default. + Either torch.float32 or torch.float64. + + Example: + >>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?") + >>> # initial default for floating point is torch.float32 + >>> # Python floats are interpreted as float32 + >>> torch.tensor([1.2, 3]).dtype + torch.float32 + >>> # initial default for floating point is torch.complex64 + >>> # Complex Python numbers are interpreted as complex64 + >>> torch.tensor([1.2, 3j]).dtype + torch.complex64 + + >>> torch.set_default_dtype(torch.float64) + + >>> # Python floats are now interpreted as float64 + >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor + torch.float64 + >>> # Complex Python numbers are now interpreted as complex128 + >>> torch.tensor([1.2, 3j]).dtype # a new complex tensor + torch.complex128 + + """ + _C._set_default_dtype(d) + +def use_deterministic_algorithms(mode: builtins.bool, *, warn_only: builtins.bool = False) -> None: + r""" Sets whether PyTorch operations must use "deterministic" + algorithms. That is, algorithms which, given the same input, and when + run on the same software and hardware, always produce the same output. + When enabled, operations will use deterministic algorithms when available, + and if only nondeterministic algorithms are available they will throw a + :class:`RuntimeError` when called. + + .. note:: This setting alone is not always enough to make an application + reproducible. Refer to :ref:`reproducibility` for more information. + + .. note:: :func:`torch.set_deterministic_debug_mode` offers an alternative + interface for this feature. + + The following normally-nondeterministic operations will act + deterministically when ``mode=True``: + + * :class:`torch.nn.Conv1d` when called on CUDA tensor + * :class:`torch.nn.Conv2d` when called on CUDA tensor + * :class:`torch.nn.Conv3d` when called on CUDA tensor + * :class:`torch.nn.ConvTranspose1d` when called on CUDA tensor + * :class:`torch.nn.ConvTranspose2d` when called on CUDA tensor + * :class:`torch.nn.ConvTranspose3d` when called on CUDA tensor + * :class:`torch.nn.ReplicationPad2d` when attempting to differentiate a CUDA tensor + * :func:`torch.bmm` when called on sparse-dense CUDA tensors + * :func:`torch.Tensor.__getitem__` when attempting to differentiate a CPU tensor + and the index is a list of tensors + * :func:`torch.Tensor.index_put` with ``accumulate=False`` + * :func:`torch.Tensor.index_put` with ``accumulate=True`` when called on a CPU + tensor + * :func:`torch.Tensor.put_` with ``accumulate=True`` when called on a CPU + tensor + * :func:`torch.Tensor.scatter_add_` when called on a CUDA tensor + * :func:`torch.gather` when called on a CUDA tensor that requires grad + * :func:`torch.index_add` when called on CUDA tensor + * :func:`torch.index_select` when attempting to differentiate a CUDA tensor + * :func:`torch.repeat_interleave` when attempting to differentiate a CUDA tensor + * :func:`torch.Tensor.index_copy` when called on a CPU or CUDA tensor + * :func:`torch.Tensor.scatter` when `src` type is Tensor and called on CUDA tensor + * :func:`torch.Tensor.scatter_reduce` when ``reduce='sum'`` or ``reduce='mean'`` and called on CUDA tensor + + The following normally-nondeterministic operations will throw a + :class:`RuntimeError` when ``mode=True``: + + * :class:`torch.nn.AvgPool3d` when attempting to differentiate a CUDA tensor + * :class:`torch.nn.AdaptiveAvgPool2d` when attempting to differentiate a CUDA tensor + * :class:`torch.nn.AdaptiveAvgPool3d` when attempting to differentiate a CUDA tensor + * :class:`torch.nn.MaxPool3d` when attempting to differentiate a CUDA tensor + * :class:`torch.nn.AdaptiveMaxPool2d` when attempting to differentiate a CUDA tensor + * :class:`torch.nn.FractionalMaxPool2d` when attempting to differentiate a CUDA tensor + * :class:`torch.nn.FractionalMaxPool3d` when attempting to differentiate a CUDA tensor + * :class:`torch.nn.MaxUnpool1d` + * :class:`torch.nn.MaxUnpool2d` + * :class:`torch.nn.MaxUnpool3d` + * :func:`torch.nn.functional.interpolate` when attempting to differentiate a CUDA tensor + and one of the following modes is used: + + - ``linear`` + - ``bilinear`` + - ``bicubic`` + - ``trilinear`` + + * :class:`torch.nn.ReflectionPad1d` when attempting to differentiate a CUDA tensor + * :class:`torch.nn.ReflectionPad2d` when attempting to differentiate a CUDA tensor + * :class:`torch.nn.ReflectionPad3d` when attempting to differentiate a CUDA tensor + * :class:`torch.nn.ReplicationPad1d` when attempting to differentiate a CUDA tensor + * :class:`torch.nn.ReplicationPad3d` when attempting to differentiate a CUDA tensor + * :class:`torch.nn.NLLLoss` when called on a CUDA tensor + * :class:`torch.nn.CTCLoss` when attempting to differentiate a CUDA tensor + * :class:`torch.nn.EmbeddingBag` when attempting to differentiate a CUDA tensor when + ``mode='max'`` + * :func:`torch.Tensor.put_` when ``accumulate=False`` + * :func:`torch.Tensor.put_` when ``accumulate=True`` and called on a CUDA tensor + * :func:`torch.histc` when called on a CUDA tensor + * :func:`torch.bincount` when called on a CUDA tensor and ``weights`` + tensor is given + * :func:`torch.kthvalue` with called on a CUDA tensor + * :func:`torch.median` with indices output when called on a CUDA tensor + * :func:`torch.nn.functional.grid_sample` when attempting to differentiate a CUDA tensor + * :func:`torch.cumsum` when called on a CUDA tensor when dtype is floating point or complex + * :func:`torch.Tensor.scatter_reduce` when ``reduce='prod'`` and called on CUDA tensor + * :func:`torch.Tensor.resize_` when called with a quantized tensor + + In addition, several operations fill uninitialized memory when this setting + is turned on and when + :attr:`torch.utils.deterministic.fill_uninitialized_memory` is turned on. + See the documentation for that attribute for more information. + + A handful of CUDA operations are nondeterministic if the CUDA version is + 10.2 or greater, unless the environment variable ``CUBLAS_WORKSPACE_CONFIG=:4096:8`` + or ``CUBLAS_WORKSPACE_CONFIG=:16:8`` is set. See the CUDA documentation for more + details: ``_ + If one of these environment variable configurations is not set, a :class:`RuntimeError` + will be raised from these operations when called with CUDA tensors: + + * :func:`torch.mm` + * :func:`torch.mv` + * :func:`torch.bmm` + + Note that deterministic operations tend to have worse performance than + nondeterministic operations. + + .. note:: + + This flag does not detect or prevent nondeterministic behavior caused + by calling an inplace operation on a tensor with an internal memory + overlap or by giving such a tensor as the :attr:`out` argument for an + operation. In these cases, multiple writes of different data may target + a single memory location, and the order of writes is not guaranteed. + + Args: + mode (:class:`bool`): If True, makes potentially nondeterministic + operations switch to a deterministic algorithm or throw a runtime + error. If False, allows nondeterministic operations. + + Keyword args: + warn_only (:class:`bool`, optional): If True, operations that do not + have a deterministic implementation will throw a warning instead of + an error. Default: ``False`` + + Example:: + + >>> # xdoctest: +SKIP + >>> torch.use_deterministic_algorithms(True) + + # Forward mode nondeterministic error + >>> torch.randn(10, device='cuda').kthvalue(1) + ... + RuntimeError: kthvalue CUDA does not have a deterministic implementation... + + # Backward mode nondeterministic error + >>> torch.nn.AvgPool3d(1)(torch.randn(3, 4, 5, 6, requires_grad=True).cuda()).sum().backward() + ... + RuntimeError: avg_pool3d_backward_cuda does not have a deterministic implementation... + """ + _C._set_deterministic_algorithms(mode, warn_only=warn_only) + +def are_deterministic_algorithms_enabled() -> builtins.bool: + r"""Returns True if the global deterministic flag is turned on. Refer to + :func:`torch.use_deterministic_algorithms` documentation for more details. + """ + return _C._get_deterministic_algorithms() + +def is_deterministic_algorithms_warn_only_enabled() -> builtins.bool: + r"""Returns True if the global deterministic flag is set to warn only. + Refer to :func:`torch.use_deterministic_algorithms` documentation for more + details. + """ + return _C._get_deterministic_algorithms_warn_only() + +def set_deterministic_debug_mode(debug_mode: Union[builtins.int, str]) -> None: + r"""Sets the debug mode for deterministic operations. + + .. note:: This is an alternative interface for + :func:`torch.use_deterministic_algorithms`. Refer to that function's + documentation for details about affected operations. + + Args: + debug_mode(str or int): If "default" or 0, don't error or warn on + nondeterministic operations. If "warn" or 1, warn on + nondeterministic operations. If "error" or 2, error on + nondeterministic operations. + """ + + # NOTE: builtins.int is used here because int in this scope resolves + # to torch.int + if not isinstance(debug_mode, (builtins.int, str)): + raise TypeError(f'debug_mode must be str or int, but got {type(debug_mode)}') + + if isinstance(debug_mode, str): + if debug_mode == 'default': + debug_mode = 0 + elif debug_mode == 'warn': + debug_mode = 1 + elif debug_mode == 'error': + debug_mode = 2 + else: + raise RuntimeError( + 'invalid value of debug_mode, expected one of `default`, ' + f'`warn`, `error`, but got {debug_mode}') + + if debug_mode == 0: + _C._set_deterministic_algorithms(False) + elif debug_mode == 1: + _C._set_deterministic_algorithms(True, warn_only=True) + elif debug_mode == 2: + _C._set_deterministic_algorithms(True) + else: + raise RuntimeError( + 'invalid value of debug_mode, expected 0, 1, or 2, ' + f'but got {debug_mode}') + +def get_deterministic_debug_mode() -> builtins.int: + r"""Returns the current value of the debug mode for deterministic + operations. Refer to :func:`torch.set_deterministic_debug_mode` + documentation for more details. + """ + + if _C._get_deterministic_algorithms(): + if _C._get_deterministic_algorithms_warn_only(): + return 1 + else: + return 2 + else: + return 0 + +def get_float32_matmul_precision() -> builtins.str: + r"""Returns the current value of float32 matrix multiplication precision. Refer to + :func:`torch.set_float32_matmul_precision` documentation for more details. + """ + return _C._get_float32_matmul_precision() + +def set_float32_matmul_precision(precision: str) -> None: + r"""Sets the internal precision of float32 matrix multiplications. + + Running float32 matrix multiplications in lower precision may significantly increase + performance, and in some programs the loss of precision has a negligible impact. + + Supports three settings: + + * "highest", float32 matrix multiplications use the float32 datatype (24 mantissa + bits with 23 bits explicitly stored) for internal computations. + * "high", float32 matrix multiplications either use the TensorFloat32 datatype (10 + mantissa bits explicitly stored) or treat each float32 number as the sum of two bfloat16 numbers + (approximately 16 mantissa bits with 14 bits explicitly stored), if the appropriate fast matrix multiplication + algorithms are available. Otherwise float32 matrix multiplications are computed + as if the precision is "highest". See below for more information on the bfloat16 + approach. + * "medium", float32 matrix multiplications use the bfloat16 datatype (8 mantissa + bits with 7 bits explicitly stored) for internal computations, if a fast matrix multiplication algorithm + using that datatype internally is available. Otherwise float32 + matrix multiplications are computed as if the precision is "high". + + When using "high" precision, float32 multiplications may use a bfloat16-based algorithm + that is more complicated than simply truncating to some smaller number mantissa bits + (e.g. 10 for TensorFloat32, 7 for bfloat16 explicitly stored). Refer to [Henry2019]_ for a complete + description of this algorithm. To briefly explain here, the first step is to realize + that we can perfectly encode a single float32 number as the sum of three bfloat16 + numbers (because float32 has 23 mantissa bits while bfloat16 has 7 explicitly stored, and both have the + same number of exponent bits). This means that the product of two float32 numbers can + be exactly given by the sum of nine products of bfloat16 numbers. We can then trade + accuracy for speed by dropping some of these products. The "high" precision algorithm + specifically keeps only the three most significant products, which conveniently excludes + all of the products involving the last 8 mantissa bits of either input. This means that + we can represent our inputs as the sum of two bfloat16 numbers rather than three. + Because bfloat16 fused-multiply-add (FMA) instructions are typically >10x faster than + float32 ones, it's faster to do three multiplications and 2 additions with bfloat16 + precision than it is to do a single multiplication with float32 precision. + + .. [Henry2019] http://arxiv.org/abs/1904.06376 + + .. note:: + + This does not change the output dtype of float32 matrix multiplications, + it controls how the internal computation of the matrix multiplication is performed. + + .. note:: + + This does not change the precision of convolution operations. Other flags, + like `torch.backends.cudnn.allow_tf32`, may control the precision of convolution + operations. + + .. note:: + + This flag currently only affects one native device type: CUDA. + If "high" or "medium" are set then the TensorFloat32 datatype will be used + when computing float32 matrix multiplications, equivalent to setting + `torch.backends.cuda.matmul.allow_tf32 = True`. When "highest" (the default) + is set then the float32 datatype is used for internal computations, equivalent + to setting `torch.backends.cuda.matmul.allow_tf32 = False`. + + Args: + precision(str): can be set to "highest" (default), "high", or "medium" (see above). + + """ + _C._set_float32_matmul_precision(precision) + +def set_warn_always(b: builtins.bool) -> None: + r"""When this flag is False (default) then some PyTorch warnings may only + appear once per process. This helps avoid excessive warning information. + Setting it to True causes these warnings to always appear, which may be + helpful when debugging. + + Args: + b (:class:`bool`): If True, force warnings to always be emitted + If False, set to the default behaviour + """ + _C._set_warnAlways(b) + +def is_warn_always_enabled() -> builtins.bool: + r"""Returns True if the global warn_always flag is turned on. Refer to + :func:`torch.set_warn_always` documentation for more details. + """ + return _C._get_warnAlways() + +################################################################################ +# Define error checking functions +################################################################################ + +# These error checking functions must be kept consistent with their C++ +# equivalents. Their C++ equivalents are mentioned where applicable. + +def _check_with(error_type, cond: Union[builtins.bool, SymBool], message: Callable[[], str]): # noqa: F811 + if not isinstance(cond, (builtins.bool, torch.SymBool)): + raise TypeError(f'cond must be a bool, but got {type(cond)}') + + from torch.fx.experimental.symbolic_shapes import expect_true + if expect_true(cond): + return + + # error_type must be a subclass of Exception and not subclass of Warning + assert issubclass(error_type, Exception) and not issubclass(error_type, Warning) + + if message is None: + message_evaluated = ( + 'Expected cond to be True, but got False. (Could this error ' + 'message be improved? If so, please report an enhancement request ' + 'to PyTorch.)') + + else: + if not callable(message): + raise TypeError('message must be a callable') + + message_evaluated = str(message()) + + raise error_type(message_evaluated) + +def _check(cond, message=None): # noqa: F811 + r"""Throws error containing an optional message if the specified condition + is False. + + Error type: ``RuntimeError`` + + C++ equivalent: ``TORCH_CHECK`` + + Args: + cond (:class:`bool`): If False, throw error + + message (Callable, optional): Callable that returns either a string or + an object that has a ``__str__()`` method to be used as the error + message. Default: ``None`` + """ + _check_with(RuntimeError, cond, message) + +def _check_is_size(i, message=None): + """Checks that a given integer is a valid size (i.e., is non-negative). + You should use this over _check(i >= 0) because we can use the semantic + information (that i is a size) to make some further inferences in case + i is an unbacked SymInt. + + NB: Do NOT use this in contexts where a -1 size would be valid (indicating + to infer the size from context, or if you should wrap-around or truncate). + Only use this if the only valid value is an honest to goodness size. + """ + # This is responsible for the expect_true + _check(i >= 0, message) + from torch.fx.experimental.symbolic_shapes import _advise_is_size + _advise_is_size(i) + +def _check_index(cond, message=None): # noqa: F811 + r"""Throws error containing an optional message if the specified condition + is False. + + Error type: ``IndexError`` + + C++ equivalent: ``TORCH_CHECK_INDEX`` + + Args: + cond (:class:`bool`): If False, throw error + + message (Callable, optional): Callable that returns either a string or + an object that has a ``__str__()`` method to be used as the error + message. Default: ``None`` + """ + _check_with(IndexError, cond, message) + +def _check_value(cond, message=None): # noqa: F811 + r"""Throws error containing an optional message if the specified condition + is False. + + Error type: ``ValueError`` + + C++ equivalent: ``TORCH_CHECK_VALUE`` + + Args: + cond (:class:`bool`): If False, throw error + + message (Callable, optional): Callable that returns either a string or + an object that has a ``__str__()`` method to be used as the error + message. Default: ``None`` + """ + _check_with(ValueError, cond, message) + +def _check_type(cond, message=None): # noqa: F811 + r"""Throws error containing an optional message if the specified condition + is False. + + Error type: ``TypeError`` + + C++ equivalent: ``TORCH_CHECK_TYPE`` + + Args: + cond (:class:`bool`): If False, throw error + + message (Callable, optional): Callable that returns either a string or + an object that has a ``__str__()`` method to be used as the error + message. Default: ``None`` + """ + _check_with(TypeError, cond, message) + +def _check_not_implemented(cond, message=None): # noqa: F811 + r"""Throws error containing an optional message if the specified condition + is False. + + Error type: ``NotImplementedError`` + + C++ equivalent: ``TORCH_CHECK_NOT_IMPLEMENTED`` + + Args: + cond (:class:`bool`): If False, throw error + + message (Callable, optional): Callable that returns either a string or + an object that has a ``__str__()`` method to be used as the error + message. Default: ``None`` + """ + _check_with(NotImplementedError, cond, message) + +def _check_tensor_all_with(error_type, cond, message=None): # noqa: F811 + if not torch.is_tensor(cond): + raise TypeError(f'cond must be a tensor, but got {type(cond)}') + + if not cond.dtype == torch.bool: + raise TypeError( + f'cond tensor must have dtype torch.bool, but got {cond.dtype}') + + _check_with(error_type, cond._is_all_true().item(), message) + +# C++ equivalent: `TORCH_CHECK_TENSOR_ALL` +def _check_tensor_all(cond, message=None): # noqa: F811 + r"""Throws error containing an optional message if the specified condition + is False. + + Error type: ``RuntimeError`` + + C++ equivalent: ``TORCH_CHECK_TENSOR_ALL`` + + Args: + cond (:class:`torch.Tensor`): Tensor of dtype ``torch.bool``. If any + element is ``False``, throw error + + message (Callable, optional): Callable that returns either a string or + an object that has a ``__str__()`` method to be used as the error + message. Default: ``None`` + """ + _check_tensor_all_with(RuntimeError, cond, message) + +################################################################################ +# Define numeric constants +################################################################################ + +# For Python Array API (https://data-apis.org/array-api/latest/API_specification/constants.html) and +# NumPy consistency (https://numpy.org/devdocs/reference/constants.html) +from math import e , nan , inf , pi +__all__.extend(['e', 'pi', 'nan', 'inf']) + +################################################################################ +# Define Storage and Tensor classes +################################################################################ + +from ._tensor import Tensor +from .storage import _StorageBase, TypedStorage, _LegacyStorage, UntypedStorage, _warn_typed_storage_removal + +# NOTE: New Storage classes should never be added. When adding a new +# dtype, use torch.storage.TypedStorage directly. + +class ByteStorage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.uint8 + +class DoubleStorage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.double + +class FloatStorage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.float + +class HalfStorage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.half + +class LongStorage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.long + +class IntStorage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.int + +class ShortStorage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.short + +class CharStorage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.int8 + +class BoolStorage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.bool + +class BFloat16Storage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.bfloat16 + +class ComplexDoubleStorage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.cdouble + +class ComplexFloatStorage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.cfloat + +class QUInt8Storage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.quint8 + +class QInt8Storage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.qint8 + +class QInt32Storage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.qint32 + +class QUInt4x2Storage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.quint4x2 + +class QUInt2x4Storage(_LegacyStorage): + @classproperty + def dtype(self): + _warn_typed_storage_removal(stacklevel=3) + return self._dtype + + @classproperty + def _dtype(self): + return torch.quint2x4 + +_storage_classes = { + UntypedStorage, DoubleStorage, FloatStorage, LongStorage, IntStorage, + ShortStorage, CharStorage, ByteStorage, HalfStorage, BoolStorage, + QUInt8Storage, QInt8Storage, QInt32Storage, BFloat16Storage, + ComplexFloatStorage, ComplexDoubleStorage, QUInt4x2Storage, QUInt2x4Storage, + TypedStorage +} + +# The _tensor_classes set is initialized by the call to initialize_python_bindings. +_tensor_classes: Set[Type] = set() + +# If you edit these imports, please update torch/__init__.py.in as well +from .random import set_rng_state, get_rng_state, manual_seed, initial_seed, seed +from .serialization import save, load +from ._tensor_str import set_printoptions + +################################################################################ +# Initialize extension +################################################################################ + +def manager_path(): + if _running_with_deploy() or platform.system() == 'Windows': + return b"" + path = get_file_path('torch', 'bin', 'torch_shm_manager') + prepare_multiprocessing_environment(get_file_path('torch')) + if not os.path.exists(path): + raise RuntimeError("Unable to find torch_shm_manager at " + path) + return path.encode('utf-8') + +from torch.amp import autocast, GradScaler + +# Initializing the extension shadows the built-in python float / int classes; +# store them for later use by SymInt / SymFloat. +py_float = float +py_int = int + +# Shared memory manager needs to know the exact location of manager executable +_C._initExtension(manager_path()) +del manager_path + +# Appease the type checker: it can't deal with direct setting of globals(). +# Note that we will see "too many" functions when reexporting this way; there +# is not a good way to fix this problem. Perhaps, try to redesign VariableFunctions +# so that this import is good enough +if TYPE_CHECKING: + # Some type signatures pulled in from _VariableFunctions here clash with + # signatures already imported. For now these clashes are ignored; see + # PR #43339 for details. + from torch._C._VariableFunctions import * # type: ignore[assignment, misc] # noqa: F403 + # Fixup segment_reduce visibility + _segment_reduce = segment_reduce + del segment_reduce # noqa: F821 + +# Ops not to be exposed in `torch` namespace, +# mostly helper ops. +PRIVATE_OPS = ( + 'unique_dim', +) + +for name in dir(_C._VariableFunctions): + if name.startswith('__') or name in PRIVATE_OPS: + continue + obj = getattr(_C._VariableFunctions, name) + obj.__module__ = 'torch' + # Hide some APIs that should not be public + if name == "segment_reduce": + # TODO: Once the undocumented FC window is passed, remove the line bellow + globals()[name] = obj + name = "_" + name + globals()[name] = obj + if not name.startswith("_"): + __all__.append(name) + + +################################################################################ +# Add torch.dtype instances to the public API +################################################################################ + +import torch + +for attribute in dir(torch): + if isinstance(getattr(torch, attribute), torch.dtype): + __all__.append(attribute) + +################################################################################ +# Import TorchDynamo's lazy APIs to avoid circular dependenices +################################################################################ + +# needs to be before from .functional import * to avoid circular dependencies +from ._compile import _disable_dynamo + +################################################################################ +# Import interface functions defined in Python +################################################################################ + +# needs to be after the above ATen bindings so we can overwrite from Python side +from .functional import * # noqa: F403 + + +################################################################################ +# Remove unnecessary members +################################################################################ + +del _StorageBase +del _LegacyStorage + +################################################################################ +# Define _assert +################################################################################ + +# needs to be before the submodule imports to avoid circular dependencies +def _assert(condition, message): + r"""A wrapper around Python's assert which is symbolically traceable. + """ + from .overrides import has_torch_function, handle_torch_function + + if type(condition) is not torch.Tensor and has_torch_function((condition,)): + return handle_torch_function(_assert, (condition,), condition, message) + assert condition, message + +################################################################################ +# Import most common subpackages +################################################################################ + +# Use the redundant form so that type checkers know that these are a part of +# the public API. The "regular" import lines are there solely for the runtime +# side effect of adding to the imported module's members for other users. +from torch import cuda as cuda +from torch import cpu as cpu +from torch import mps as mps +from torch import xpu as xpu +from torch import autograd as autograd +from torch.autograd import ( + no_grad as no_grad, + enable_grad as enable_grad, + set_grad_enabled as set_grad_enabled, + inference_mode as inference_mode, +) +from torch import fft as fft +from torch import futures as futures +from torch import _awaits as _awaits +from torch import nested as nested +from torch import nn as nn +from torch.signal import windows as windows +from torch import optim as optim +import torch.optim._multi_tensor +from torch import multiprocessing as multiprocessing +from torch import sparse as sparse +from torch import special as special +import torch.utils.backcompat +from torch import jit as jit +from torch import linalg as linalg +from torch import hub as hub +from torch import random as random +from torch import distributions as distributions +from torch import testing as testing +from torch import backends as backends +import torch.utils.data +from torch import __config__ as __config__ +from torch import __future__ as __future__ +from torch import profiler as profiler + +# Quantized, sparse, AO, etc. should be last to get imported, as nothing +# is expected to depend on them. +from torch import ao as ao +# nn.quant* depends on ao -- so should be after those. +import torch.nn.quantizable +import torch.nn.quantized +import torch.nn.qat +import torch.nn.intrinsic + +_C._init_names(list(torch._storage_classes)) + +# attach docstrings to torch and tensor functions +from . import _torch_docs, _tensor_docs, _storage_docs +del _torch_docs, _tensor_docs, _storage_docs + + +def compiled_with_cxx11_abi() -> builtins.bool: + r"""Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1""" + return _C._GLIBCXX_USE_CXX11_ABI + + +# Import the ops "namespace" +from torch._ops import ops +from torch._classes import classes +import torch._library + +# quantization depends on torch.fx +# Import quantization +from torch import quantization as quantization + +# Import the quasi random sampler +from torch import quasirandom as quasirandom + +# If you are seeing this, it means that this call site was not checked if +# the memory format could be preserved, and it was switched to old default +# behaviour of contiguous +legacy_contiguous_format = contiguous_format + +# Register fork handler to initialize OpenMP in child processes (see gh-28389) +from torch.multiprocessing._atfork import register_after_fork +register_after_fork(torch.get_num_threads) +del register_after_fork + +# Import tools that require fully imported torch (for applying +# torch.jit.script as a decorator, for instance): +from ._lobpcg import lobpcg as lobpcg + +# These were previously defined in native_functions.yaml and appeared on the +# `torch` namespace, but we moved them to c10 dispatch to facilitate custom +# class usage. We add these lines here to preserve backward compatibility. +quantized_lstm = torch.ops.aten.quantized_lstm +quantized_gru = torch.ops.aten.quantized_gru + +from torch.utils.dlpack import from_dlpack, to_dlpack + +# Import experimental masked operations support. See +# [RFC-0016](https://github.com/pytorch/rfcs/pull/27) for more +# information. +from . import masked + +# Import removed ops with error message about removal +from ._linalg_utils import ( # type: ignore[misc] + matrix_rank, + eig, + solve, + lstsq, +) +from ._linalg_utils import _symeig as symeig # type: ignore[misc] + +class _TorchCompileInductorWrapper: + compiler_name = "inductor" + + def __init__(self, mode, options, dynamic): + self.config: Dict[str, Any] = dict() + self.dynamic = dynamic + self.apply_mode(mode) + self.apply_options(options) + + if self.config.get("triton.cudagraphs", False): + os.environ["DISABLE_CUPTI_LAZY_REINIT"] = "1" + # FIXME: CUDA Graph does not work well with CUPTI teardown. + # 1) crashes on 1st lazy CUPTI re-init after teardown (CUDA 11) + # 2) crashes on 2nd non-lazy CUPTI re-init after teardown (CUDA 12) + # Workaround: turn off CUPTI teardown when using CUDA Graphs. + os.environ["TEARDOWN_CUPTI"] = "0" + + def __eq__(self, other): + return (isinstance(other, _TorchCompileInductorWrapper) and + self.config == other.config and + self.dynamic == other.dynamic) + + def apply_mode(self, mode: Optional[str]): + if mode is None or mode == "default": + pass + elif mode in ("reduce-overhead", "max-autotune", "max-autotune-no-cudagraphs"): + from torch._inductor import list_mode_options + self.apply_options(list_mode_options(mode, self.dynamic)) + else: + raise RuntimeError( + f"Unrecognized mode={mode}, should be one of: default, reduce-overhead, max-autotune, max-autotune-no-cudagraphs" + ) + + def apply_options(self, options: Optional[Dict[str, Any]]): + if not options: + return + + from torch._inductor import config + current_config: Dict[str, Any] = config.shallow_copy_dict() + + for key, val in options.items(): + attr_name = key.replace("-", "_") + if attr_name not in current_config: + raise RuntimeError( + f"Unexpected optimization option {key}, known options are {list(current_config.keys())}" + ) + if type(val) is not type(current_config[attr_name]): + val_type_str = type(val).__name__ + expected_type_str = type(current_config[attr_name]).__name__ + raise RuntimeError( + f"Unexpected type of attr {key}, got {val_type_str} should be {expected_type_str}" + ) + self.config[attr_name] = val + + def __call__(self, model_, inputs_): + from torch._inductor.compile_fx import compile_fx + + return compile_fx(model_, inputs_, config_patches=self.config) + + def get_compiler_config(self): + from torch._inductor.compile_fx import get_patched_config_dict + return get_patched_config_dict(config_patches=self.config) + + def reset(self): + from torch._inductor import config + if "triton.cudagraphs" in self.config or config.triton.cudagraphs: + if self.config.get("triton.cudagraphs", True): + from torch._inductor.cudagraph_trees import reset_cudagraph_trees + reset_cudagraph_trees() + +class _TorchCompileWrapper: + def __init__(self, backend, mode, options, dynamic): + from torch._dynamo.backends.registry import lookup_backend + + if isinstance(backend, str): + self.compiler_name = backend + elif hasattr(backend, "__name__"): + self.compiler_name = backend.__name__ + else: + self.compiler_name = str(backend) + self.dynamic = dynamic + self.compiler_fn = lookup_backend(backend) + self.kwargs = {} + # only pass the args if they non-empty + if mode and mode != "default": + self.kwargs["mode"] = mode + if options: + self.kwargs["options"] = options + + def __eq__(self, other): + return (isinstance(other, _TorchCompileWrapper) and + self.compiler_fn == other.compiler_fn and + self.kwargs == other.kwargs and + self.dynamic == other.dynamic) + + def __call__(self, model_, inputs_): + return self.compiler_fn(model_, inputs_, **self.kwargs) + + def reset(self): + if hasattr(self.compiler_fn, "reset"): + self.compiler_fn.reset() + + +def compile(model: Optional[Callable] = None, *, + fullgraph: builtins.bool = False, + dynamic: Optional[builtins.bool] = None, + backend: Union[str, Callable] = "inductor", + mode: Union[str, None] = None, + options: Optional[Dict[str, Union[str, builtins.int, builtins.bool]]] = None, + disable: builtins.bool = False) -> Callable: + """ + Optimizes given model/function using TorchDynamo and specified backend. + + Concretely, for every frame executed within the compiled region, we will attempt + to compile it and cache the compiled result on the code object for future + use. A single frame may be compiled multiple times if previous compiled + results are not applicable for subsequent calls (this is called a "guard + failure), you can use TORCH_LOGS=guards to debug these situations. + Multiple compiled results can be associated with a frame up to + ``torch._dynamo.config.cache_size_limit``, which defaults to 64; at which + point we will fall back to eager. Note that compile caches are per + *code object*, not frame; if you dynamically create multiple copies of a + function, they will all share the same code cache. + + Args: + model (Callable): Module/function to optimize + fullgraph (bool): If False (default), torch.compile attempts to discover compileable regions + in the function that it will optimize. If True, then we require that the entire function be + capturable into a single graph. If this is not possible (that is, if there are graph breaks), + then this will raise an error. + dynamic (bool or None): Use dynamic shape tracing. When this is True, we will up-front attempt + to generate a kernel that is as dynamic as possible to avoid recompilations when + sizes change. This may not always work as some operations/optimizations will + force specialization; use TORCH_LOGS=dynamic to debug overspecialization. + When this is False, we will NEVER generate dynamic kernels, we will always specialize. + By default (None), we automatically detect if dynamism has occurred and compile a more + dynamic kernel upon recompile. + backend (str or Callable): backend to be used + + - "inductor" is the default backend, which is a good balance between performance and overhead + + - Non experimental in-tree backends can be seen with `torch._dynamo.list_backends()` + + - Experimental or debug in-tree backends can be seen with `torch._dynamo.list_backends(None)` + + - To register an out-of-tree custom backend: https://pytorch.org/docs/main/compile/custom-backends.html + mode (str): Can be either "default", "reduce-overhead", "max-autotune" or "max-autotune-no-cudagraphs" + + - "default" is the default mode, which is a good balance between performance and overhead + + - "reduce-overhead" is a mode that reduces the overhead of python with CUDA graphs, + useful for small batches. Reduction of overhead can come at the cost of more memory + usage, as we will cache the workspace memory required for the invocation so that we + do not have to reallocate it on subsequent runs. Reduction of overhead is not guaranteed + to work; today, we only reduce overhead for CUDA only graphs which do not mutate inputs. + There are other circumstances where CUDA graphs are not applicable; use TORCH_LOG=perf_hints + to debug. + + - "max-autotune" is a mode that leverages Triton based matrix multiplications and convolutions + It enables CUDA graphs by default. + + - "max-autotune-no-cudagraphs" is a mode similar to "max-autotune" but without CUDA graphs + + - To see the exact configs that each mode sets you can call `torch._inductor.list_mode_options()` + + options (dict): A dictionary of options to pass to the backend. Some notable ones to try out are + + - `epilogue_fusion` which fuses pointwise ops into templates. Requires `max_autotune` to also be set + + - `max_autotune` which will profile to pick the best matmul configuration + + - `fallback_random` which is useful when debugging accuracy issues + + - `shape_padding` which pads matrix shapes to better align loads on GPUs especially for tensor cores + + - `triton.cudagraphs` which will reduce the overhead of python with CUDA graphs + + - `trace.enabled` which is the most useful debugging flag to turn on + + - `trace.graph_diagram` which will show you a picture of your graph after fusion + + - For inductor you can see the full list of configs that it supports by calling `torch._inductor.list_options()` + disable (bool): Turn torch.compile() into a no-op for testing + + Example:: + + @torch.compile(options={"triton.cudagraphs": True}, fullgraph=True) + def foo(x): + return torch.sin(x) + torch.cos(x) + + """ + _C._log_api_usage_once("torch.compile") + # Temporary until we get proper support for python 3.12 + if sys.version_info >= (3, 12): + raise RuntimeError("Dynamo is not supported on Python 3.12+") + + # Decorator mode + if model is None: + def fn(model: Callable): + if model is None: + raise RuntimeError("Model can't be None") + return compile(model, + fullgraph=fullgraph, + dynamic=dynamic, + backend=backend, + mode=mode, + options=options, + disable=disable) + return fn + + if mode is not None and options is not None: + raise RuntimeError("Either mode or options can be specified, but both can't be specified at the same time.") + if mode is None and options is None: + mode = "default" + if backend == "inductor": + backend = _TorchCompileInductorWrapper(mode, options, dynamic) + else: + backend = _TorchCompileWrapper(backend, mode, options, dynamic) + + return torch._dynamo.optimize(backend=backend, nopython=fullgraph, dynamic=dynamic, disable=disable)(model) + + +from torch import export as export + +from torch._higher_order_ops import cond + +def _register_device_module(device_type, module): + r"""Register an external runtime module of the specific :attr:`device_type` + supported by torch. + + After the :attr:`module` is registered correctly, the user can refer + the external runtime module as part of torch with attribute torch.xxx. + """ + # Make sure the device_type represent a supported device type for torch. + device_type = torch.device(device_type).type + m = sys.modules[__name__] + if hasattr(m, device_type): + raise RuntimeError(f"The runtime module of '{device_type}' has already " + f"been registered with '{getattr(m, device_type)}'") + setattr(m, device_type, module) + torch_module_name = '.'.join([__name__, device_type]) + sys.modules[torch_module_name] = module + +# expose return_types +from . import return_types +from . import library +if not TYPE_CHECKING: + from . import _meta_registrations + +# Enable CUDA Sanitizer +if 'TORCH_CUDA_SANITIZER' in os.environ: + import torch.cuda._sanitizer as csan + + csan.enable_cuda_sanitizer() + +# Populate magic methods on SymInt and SymFloat +import torch.fx.experimental.sym_node + +from torch import func as func +from torch.func import vmap + + +# The function _sparse_coo_tensor_unsafe is removed from PyTorch +# Python API (v. 1.13), here we temporarily provide its replacement +# with a deprecation warning. +# TODO: remove the function for PyTorch v 1.15. +def _sparse_coo_tensor_unsafe(*args, **kwargs): + import warnings + warnings.warn('torch._sparse_coo_tensor_unsafe is deprecated, ' + 'use torch.sparse_coo_tensor(..., check_invariants=False) instead.') + kwargs['check_invariants'] = False + return torch.sparse_coo_tensor(*args, **kwargs) + +# Register MPS specific decomps +torch.backends.mps._init() + +if not _running_with_deploy(): + from torch import compiler as compiler + + class _TritonLibrary: + lib = torch.library.Library("triton", "DEF") + ops_table: Dict[Tuple[str, str], Callable] = {} + + @classmethod + def registerOp(cls, op_key, full_schema, op_impl, dispatch_key): + if (op_key, dispatch_key) not in cls.ops_table: + cls.lib.define(full_schema) + cls.lib.impl("triton::" + op_key, op_impl, dispatch_key) + cls.ops_table[(op_key, dispatch_key)] = op_impl + + return cls.ops_table[(op_key, dispatch_key)] + + +# Deprecated attributes +_deprecated_attrs = { + "has_mps": torch.backends.mps.is_built, + "has_cuda": torch.backends.cuda.is_built, + "has_cudnn": torch.backends.cudnn.is_available, + "has_mkldnn": torch.backends.mkldnn.is_available, +} + +if TYPE_CHECKING: + # Import the following modules during type checking to enable code intelligence features, + # such as auto-completion in tools like pylance, even when these modules are not explicitly + # imported in user code. + from torch import _dynamo as _dynamo + from torch import _inductor as _inductor + from torch import onnx as onnx + +else: + _lazy_modules = { + "_dynamo", + "_inductor", + "_export", + # ONNX must be imported after _dynamo, _ops, _subclasses, fx, func and jit + "onnx", + } + + def __getattr__(name): + # Deprecated attrs + replacement = _deprecated_attrs.get(name) + if replacement is not None: + import warnings + warnings.warn(f"'{name}' is deprecated, please use '{replacement.__module__}.{replacement.__name__}()'", stacklevel=2) + return replacement() + + # Lazy modules + if name in _lazy_modules: + import importlib + return importlib.import_module(f".{name}", __name__) + + raise AttributeError(f"module '{__name__}' has no attribute '{name}'") + + +def _constrain_as_value(symbol, min: Optional[builtins.int] = None, max: Optional[builtins.int] = None): + """ + Add min/max constraint on the intermediate symbol at tracing time. If called in eager mode, + it will still check if the input value is within the specified range. + """ + torch.sym_constrain_range(symbol, min=min, max=max) + + +def _constrain_as_size(symbol, min: Optional[builtins.int] = None, max: Optional[builtins.int] = None): + """ + This indicates that a given int is size-like, and can be used in any context where a size is expected. + You will typically use this when reading out integers from Tensors, e.g., max.item() or lengths.tolist() + which then need to be used as tensor constructors. Providing these assertions to PyTorch can help resolve + GuardOnDataDependentSymNode errors upon export, since we cannot guard on unbacked SymInts. + + This function has unusual semantics which distinguish it from + constrain_as_value. Specifically, in some circumstances in framework + code, we will treat this int as >= 2 (when we do a size-oblivious guard). + This makes it easier to This makes it easier to use the unbacked int in + size contexts, as we will often attempt to guard on a size being zero/one + (e.g., when computing the contiguity of a tensor, or testing if + broadcasting can occur), which will not work on unbacked SymInts. + However, if we conservatively assume that the size is not zero/one, we will + end up with a graph that will still work even if the size is zero/one. + + For more details, see https://docs.google.com/document/d/1HSuTTVvYH1pTew89Rtpeu84Ht3nQEFTYhAX3Ypa_xJs/edit + ``` + """ + torch.sym_constrain_range_for_size(symbol, min=min, max=max) + + +from . import _logging +_logging._init_logs() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_compile.py b/llmeval-env/lib/python3.10/site-packages/torch/_compile.py new file mode 100644 index 0000000000000000000000000000000000000000..354d64e9ff9fddc9a1dc321241ce8bea7955b58a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_compile.py @@ -0,0 +1,30 @@ +""" +APIs related to torch.compile which lazily import torch._dynamo to avoid +circular dependencies. +""" +import functools + + +def _disable_dynamo(fn=None, recursive=True): + """ + This API should be only used inside torch, external users should still use + torch._dynamo.disable. The main goal of this API is to avoid circular + imports issues that is common while using _dynamo.disable inside torch + itself. + + This API avoids it by lazily importing torch._dynamo from the import time to + the invocation of the decorated function. + """ + if fn is not None: + + @functools.wraps(fn) + def inner(*args, **kwargs): + import torch._dynamo + + return torch._dynamo.disable(fn, recursive)(*args, **kwargs) + + return inner + else: + # decorator usage like @_disable_dynamo(recursive=False). The resulting + # object expects the original decorated function as the arg. + return functools.partial(_disable_dynamo, recursive=recursive) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_jit_internal.py b/llmeval-env/lib/python3.10/site-packages/torch/_jit_internal.py new file mode 100644 index 0000000000000000000000000000000000000000..64509816e09cf029bce0663287d94661c6c4585c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_jit_internal.py @@ -0,0 +1,1510 @@ +""" +The weak_script annotation needs to be here instead of inside torch/jit/ so it +can be used in other places in torch/ (namely torch.nn) without running into +circular dependency problems +""" + +import ast +import builtins +import collections +import contextlib +import enum +import inspect +import io +import pickle +import sys +import threading +import types +import typing +import warnings +import weakref +from textwrap import dedent +from typing import ( # noqa: F401 + Any, + Callable, + Dict, + Final, + ForwardRef, + Generic, + get_args, # new in 3.8 + get_origin, # new in 3.8 + List, + Optional, + Tuple, + Type, + TypeVar, + Union, +) + +import torch + +# This is needed. `torch._jit_internal` is imported before `torch.distributed.__init__`. +# Explicitly ask to import `torch.distributed.__init__` first. +# Otherwise, "AttributeError: module 'torch' has no attribute 'distributed'" is raised. +import torch.distributed.rpc +import torch.package._mangling as package_mangling +from torch._awaits import _Await +from torch._C import _Await as CAwait, Future as CFuture +from torch._sources import fake_range, get_source_lines_and_file, parse_def +from torch.futures import Future + +IS_PY39_PLUS: Final[bool] = sys.version_info >= (3, 9) +IS_PY310_PLUS: Final[bool] = sys.version_info >= (3, 10) + +BuiltinUnionType: Union[Type, Tuple[Type, ...]] +if sys.version_info >= (3, 10): + # NOTE: IS_PY310_PLUS doesn't work with mypy. + # cf. https://mypy.readthedocs.io/en/stable/common_issues.html#python-version-and-system-platform-checks + BuiltinUnionType = types.UnionType +else: + BuiltinUnionType = () # trick: this makes isinstance short circuit. + +LockType: Type +try: + import _thread + + LockType = _thread.LockType +except ImportError: + import _dummy_thread # type: ignore[import-not-found] + + LockType = _dummy_thread.LockType + +# Wrapper functions that can call either of 2 functions depending on a boolean +# argument +boolean_dispatched: "weakref.WeakKeyDictionary[Callable, Dict[str, Callable]]" = ( + weakref.WeakKeyDictionary() +) # noqa: T484 + + +FAKE_FILENAME_PREFIX = "__torch_jit_dataclass" + + +class SourceLoader: + def __init__(self): + self.content = {} + + def cache(self, fn, source): + self.content[fn] = source + + def get_source(self, fn): + return self.content.get(fn) + + +loader = SourceLoader() + + +def createResolutionCallbackFromEnv(lookup_base): + """ + Creates a resolution callback that will look up qualified names in an + environment, starting with `lookup_base` for the base of any qualified + names, then proceeding down the lookup chain with the resolved object. + + You should not use this directly, it should only be used from the other + createResolutionCallbackFrom* functions. + """ + + def lookupInModule(qualified_name, module): + if "." in qualified_name: + base, remaining_pieces = qualified_name.split(".", maxsplit=1) + module_value = getattr(module, base) + return lookupInModule(remaining_pieces, module_value) + else: + return getattr(module, qualified_name) + + def parseNestedExpr(expr, module) -> Tuple[Any, int]: + i = 0 + while i < len(expr) and expr[i] not in (",", "[", "]"): + i += 1 + + # Special case logic for the empty Tuple as a subscript (used + # in the type annotation `Tuple[()]`) + if expr[:i] == "()": + return (), i + + base = lookupInModule(expr[:i].strip(), module) + assert base is not None, f"Unresolvable type {expr[:i]}" + if i == len(expr) or expr[i] != "[": + return base, i + + assert expr[i] == "[" + parts = [] + while expr[i] != "]": + part_len = 0 + i += 1 + part, part_len = parseNestedExpr(expr[i:], module) + parts.append(part) + i += part_len + if len(parts) > 1: + return base[tuple(parts)], i + 1 + else: + return base[parts[0]], i + 1 + + def parseExpr(expr, module): + try: + value, len_parsed = parseNestedExpr(expr, module) + assert len_parsed == len( + expr + ), "whole expression was not parsed, falling back to c++ parser" + return value + except Exception: + """ + The python resolver fails in several cases in known unit tests, and is intended + to fall back gracefully to the c++ resolver in general. For example, python 2 style + annotations which are frequent in our unit tests often fail with types e.g. int not + resolvable from the calling frame. + """ + return None + + return lambda expr: parseExpr(expr, lookup_base) + + +def createResolutionCallbackFromFrame(frames_up: int = 0): + """ + Creates a function which, given a string variable name, + returns the value of the variable in the scope of the caller of + the function which called createResolutionCallbackFromFrame (by default). + + This is used to enable access in-scope Python variables inside + TorchScript fragments. + + frames_up is number of additional frames to go up on the stack. + The default value is 0, which correspond to the frame of the caller + of createResolutionCallbackFromFrame. Also for example, if frames_up is set + to 1, then the frame of the caller's caller of createResolutionCallbackFromFrame + will be taken. + + For example, the following program prints 2:: + + def bar(): + cb = createResolutionCallbackFromFrame(1) + print(cb("foo")) + + def baz(): + foo = 2 + bar() + + baz() + """ + frame = inspect.currentframe() + i = 0 + while i < frames_up + 1: + assert frame is not None + frame = frame.f_back + i += 1 + + assert frame is not None + f_locals = frame.f_locals + f_globals = frame.f_globals + + class env: + def __getattr__(self, key): + if key in f_locals: + return f_locals[key] + elif key in f_globals: + return f_globals[key] + elif key in dir(builtins): + return getattr(builtins, key) + + return createResolutionCallbackFromEnv(env()) + + +def get_closure(fn): + """ + Get a dictionary of closed over variables from a function + """ + captures = {} + captures.update(fn.__globals__) + + for index, captured_name in enumerate(fn.__code__.co_freevars): + captures[captured_name] = fn.__closure__[index].cell_contents + + return captures + + +# [local resolution in python] +# Depending on where a variable is defined, and where it is used, we may +# or may not be able to recover its value when recursively compiling a +# script function. Remember in the general case, a module or function is +# first defined and then later scripted. This means we do not have a +# chance to capture the active frames when the function is defined. Hence any +# name resolution has to happen later on the created closure. The way +# python captures type annotations restricts what we can recover. The +# follow example illustrates the different cases: +# +# class MyGlobalClass: +# ... +# def my_local_scope(): +# @torch.jit.script +# class MyClass: +# ... +# @torch.jit.script +# class MyClassUsedAsVar: +# ... +# def eg(x: MyClass, y: MyGlobalClass): +# a_local_capture : Foo +# return MyClassUsedAsVar(x) +# +# MyGlobalClass is defined in the __globals__ dictionary of function +# 'eg', so it is always recoverable. my_local_scope introduces a new local +# variable scope in the function. Classes defined here are only visible as +# local variables. For the case of MyClassUsedAsVar, it is captured +# because it is used as a variable inside the body of the function, and we +# can resolve it using the captures returned from `get_closure`. However, +# the type annotations are not captured by the closure. In Python +# 3.0--3.9, the _value_ of MyClass and MyGlobalClass will be available as +# annotations on `eg``, but starting in Python 4.0, they will represented as +# strings and no longer present. Furthermore, since the body of `eg` does +# not reference those names, they do not appear in the list of closed over +# variables. In Python 2.x, type annotations are in comments, leading to a +# similar situation where their definitions are not available. We anticipate +# that most users will not run into this issue because their modules and +# functions will be defined at a global scope like MyGlobalClass. In cases +# where they are not, it is possible to work around issues by declaring the +# values global in the function. +# In Python 3.9 declaring class as global will make it invisible to +# `inspect.getsource`, see https://bugs.python.org/issue42666 . +# This could be worked around by manualy adding it to `global()` dictionary. + + +def createResolutionCallbackFromClosure(fn): + """ + Create a resolutionCallback by introspecting the function instead of + looking up the stack for the enclosing scope + """ + closure = get_closure(fn) + + class closure_lookup: + # This is a class since `closure` is a dict and it's easier in + # `env_helper` if everything just works with `getattr` calls + def __getattr__(self, key): + if key in closure: + return closure[key] + elif hasattr(typing, key): + return getattr(typing, key) + elif hasattr(builtins, key): + return getattr(builtins, key) + return None + + return createResolutionCallbackFromEnv(closure_lookup()) + + +def can_compile_class(cls) -> bool: + # If any of the functions on a type don't have a code object, this type can't + # be compiled and is probably a builtin / bound from C + if is_ignored_fn(cls): + return False + + # Ignore the following list of built-in classes. + ignored_builtin_classes = (torch.nn.Module, tuple, list, Exception) + if issubclass(cls, ignored_builtin_classes): + return False + + names = cls.__dict__ + fns = [ + getattr(cls, name) + for name in names + if inspect.isroutine(getattr(cls, name, None)) + ] + has_code = [hasattr(fn, "__code__") for fn in fns] + return all(has_code) + + +def get_callable_argument_names(fn) -> List[str]: + """ + Gets names of all POSITIONAL_OR_KEYWORD arguments for callable `fn`. + Returns an empty list when other types of arguments are present. + + This is used by `torch.jit.trace` to assign meaningful argument names to + traced functions and modules. + + Args: + fn: A callable. + Returns: + Argument names: List[str] + """ + # inspect.signature may fail, give up in that case. + try: + callable_signature = inspect.signature(fn) + except Exception: + return [] + + argument_names = [] + for name, param in callable_signature.parameters.items(): + # All four other types of arguments do not map to individual values + # with a keyword as name. + if not param.kind == param.POSITIONAL_OR_KEYWORD: + continue + + argument_names.append(name) + + return argument_names + + +def get_annotation_str(annotation): + """ + Convert an AST node containing a type annotation to the string present in the source + that represents the same annotation. + """ + if isinstance(annotation, ast.Name): + return annotation.id + elif isinstance(annotation, ast.Attribute): + return ".".join([get_annotation_str(annotation.value), annotation.attr]) + elif isinstance(annotation, ast.Subscript): + # In Python3.9+ subscript indicies are not wrapped in ast.Index + subscript_slice = annotation.slice if IS_PY39_PLUS else annotation.slice.value # type: ignore[attr-defined] + return f"{get_annotation_str(annotation.value)}[{get_annotation_str(subscript_slice)}]" + elif isinstance(annotation, ast.Tuple): + return ",".join([get_annotation_str(elt) for elt in annotation.elts]) + elif isinstance(annotation, (ast.Constant, ast.NameConstant)): + return f"{annotation.value}" + + # If an AST node is not handled here, it's probably handled in ScriptTypeParser. + return None + + +def get_type_hint_captures(fn): + """ + Get a dictionary containing type resolution mappings necessary to resolve types + for the literal annotations on 'fn'. These are not considered to be closed-over by fn + and must be obtained separately (e.g. using this function). + + Args: + fn: A callable. + Returns: + A Dict[str, Any] containing a mapping from the literal annotations used on + fn to the Python objects they refer to. + """ + # First, try to get the source of the function. We'll need to parse it to find the actual string names + # that were used to annotate the types, since inspect.signature() will only return the class object that + # the annotation refers to, not the string name. If we can't get the source, simply return an empty dict. + # This may happen in cases where the function is synthesized dynamically at runtime. + src = loader.get_source(fn) + if src is None: + src = inspect.getsource(fn) + + # Gather a dictionary of parameter name -> type, skipping any parameters whose annotated + # types are strings. These are only understood by TorchScript in the context of a type annotation + # that refers to a class in its own definition, but trying to include a mapping for this in the result + # function would cause infinite recursion because the class is currently being compiled. + # In addition, there is logic in ScriptTypeParser to handle this. + signature = inspect.signature(fn) + name_to_type = { + name: parameter.annotation + for name, parameter in signature.parameters.items() + if parameter.annotation is not inspect.Parameter.empty + and not isinstance(parameter.annotation, str) + } + + # Then, get the literal type annotations from the function declaration + # by source inspection. This accounts for the case in which aliases are used + # to annotate the arguments (e.g device_t = torch.device, and then d: device_t). + # frontend.py cannot be used here because it includes _jit_internal, so use ast instead. + a = ast.parse(dedent(src)) + if len(a.body) != 1 or not isinstance(a.body[0], ast.FunctionDef): + raise RuntimeError(f"Expected {fn} to be a function") + f = a.body[0] + + # Prepare a dictionary of source annotation -> type, which will be the final result of this function, + # by using the parsed AST (f) to reconstruct source annotations as strings for each parameter and mapping + # them to the type object corresponding to the annotation via name_to_type using the parameter name. + annotation_to_type = {} + + for arg in f.args.args: + # Get the source type annotation string for this argument if possible. + arg_annotation_str = ( + get_annotation_str(arg.annotation) if arg.annotation else None + ) + + # If the argument has no annotation or get_annotation_str cannot convert it to a string, + # arg_annotation_str will be None. Skip this arg; ScriptTypeParser will probably handle + # this in the latter case. + if arg_annotation_str is None: + continue + + # Insert {arg_annotation_str: type} into annotation_to_type if possible. One reason arg_name may not + # be present in name_to_type is that the annotation itself is a string and not a type object + # (common for self-refential annotations in classes). Once again, let ScriptTypeParser handle this. + arg_name = arg.arg + if arg_name in name_to_type: + annotation_to_type[arg_annotation_str] = name_to_type[arg_name] + + # If there is a valid return annotation, include it in annotation_to_type. As with argument annotations, + # the literal annotation has to be convertible to a string by get_annotation_str, and the actual type + # of the annotation cannot be a string. + literal_return_annotation = get_annotation_str(f.returns) + valid_literal_annotation = literal_return_annotation is not None + return_annotation = signature.return_annotation + valid_return_annotation_type = ( + return_annotation is not inspect.Parameter.empty + and not isinstance(return_annotation, str) + ) + if valid_literal_annotation and valid_return_annotation_type: + annotation_to_type[literal_return_annotation] = return_annotation + + return annotation_to_type + + +def createResolutionCallbackForClassMethods(cls): + """ + This looks at all the methods defined in a class and pulls their closed-over + variables into a dictionary and uses that to resolve variables. + """ + # cls is a type here, so `ismethod` is false since the methods on the type + # aren't bound to anything, so Python treats them as regular functions + fns = [ + getattr(cls, name) + for name in cls.__dict__ + if inspect.isroutine(getattr(cls, name)) + ] + # Skip built-ins, as they do not have global scope nor type hints + # Needed to support `enum.Enum` derived classes in Python-3.11 + # That adds `_new_member_` property which is an alias to `__new__` + fns = [fn for fn in fns if not inspect.isbuiltin(fn) and hasattr(fn, "__globals__")] + captures = {} + + for fn in fns: + captures.update(get_closure(fn)) + captures.update(get_type_hint_captures(fn)) + + def lookup_in_class(key): + if key in captures: + return captures[key] + else: + return getattr(builtins, key, None) + + return lookup_in_class + + +def boolean_dispatch( + arg_name, arg_index, default, if_true, if_false, module_name, func_name +): + """ + Dispatches to either of 2 script functions based on a boolean argument. + In TorchScript, the boolean argument must be constant so that the correct + function to use can be determined at compile time. + """ + + def fn(*args, **kwargs): + dispatch_flag = default + if arg_name in kwargs: + dispatch_flag = kwargs[arg_name] + elif arg_index < len(args): + dispatch_flag = args[arg_index] + + if dispatch_flag: + return if_true(*args, **kwargs) + else: + return if_false(*args, **kwargs) + + if if_true.__doc__ is None and if_false.__doc__ is not None: + doc = if_false.__doc__ + if_true.__doc__ = doc + elif if_false.__doc__ is None and if_true.__doc__ is not None: + doc = if_true.__doc__ + if_false.__doc__ = doc + elif if_false.__doc__ is None and if_true.__doc__ is None: + # neither function has a docstring + doc = None + else: + raise RuntimeError("only one function can have a docstring") + fn.__doc__ = doc + + if module_name is not None: + fn.__module__ = module_name + if func_name is not None: + fn.__name__ = func_name + + boolean_dispatched[fn] = { + "if_true": if_true, + "if_false": if_false, + "index": arg_index, + "default": default, + "arg_name": arg_name, + } + return fn + + +class FunctionModifiers: + """ + Used to denote the behavior of a function in TorchScript. See export() and + ignore() for details. + """ + + UNUSED = "unused (ignored and replaced with raising of an exception)" + IGNORE = "ignore (leave as a call to Python, cannot be torch.jit.save'd)" + EXPORT = "export (compile this function even if nothing calls it)" + DEFAULT = "default (compile if called from a exported function / forward)" + COPY_TO_SCRIPT_WRAPPER = ( + "if this method is not scripted, copy the python method onto the scripted model" + ) + _DROP = "_drop (function is fully ignored, declaration can be unscriptable)" + + +def export(fn): + """ + This decorator indicates that a method on an ``nn.Module`` is used as an entry point into a + :class:`ScriptModule` and should be compiled. + + ``forward`` implicitly is assumed to be an entry point, so it does not need this decorator. + Functions and methods called from ``forward`` are compiled as they are seen + by the compiler, so they do not need this decorator either. + + Example (using ``@torch.jit.export`` on a method): + + .. testcode:: + + import torch + import torch.nn as nn + + class MyModule(nn.Module): + def implicitly_compiled_method(self, x): + return x + 99 + + # `forward` is implicitly decorated with `@torch.jit.export`, + # so adding it here would have no effect + def forward(self, x): + return x + 10 + + @torch.jit.export + def another_forward(self, x): + # When the compiler sees this call, it will compile + # `implicitly_compiled_method` + return self.implicitly_compiled_method(x) + + def unused_method(self, x): + return x - 20 + + # `m` will contain compiled methods: + # `forward` + # `another_forward` + # `implicitly_compiled_method` + # `unused_method` will not be compiled since it was not called from + # any compiled methods and wasn't decorated with `@torch.jit.export` + m = torch.jit.script(MyModule()) + """ + fn._torchscript_modifier = FunctionModifiers.EXPORT + return fn + + +def unused(fn): + """ + This decorator indicates to the compiler that a function or method should + be ignored and replaced with the raising of an exception. This allows you + to leave code in your model that is not yet TorchScript compatible and still + export your model. + + Example (using ``@torch.jit.unused`` on a method):: + + import torch + import torch.nn as nn + + class MyModule(nn.Module): + def __init__(self, use_memory_efficient): + super().__init__() + self.use_memory_efficient = use_memory_efficient + + @torch.jit.unused + def memory_efficient(self, x): + import pdb + pdb.set_trace() + return x + 10 + + def forward(self, x): + # Use not-yet-scriptable memory efficient mode + if self.use_memory_efficient: + return self.memory_efficient(x) + else: + return x + 10 + + m = torch.jit.script(MyModule(use_memory_efficient=False)) + m.save("m.pt") + + m = torch.jit.script(MyModule(use_memory_efficient=True)) + # exception raised + m(torch.rand(100)) + """ + if isinstance(fn, property): + prop = fn + setattr( # noqa: B010 + prop.fget, "_torchscript_modifier", FunctionModifiers.UNUSED + ) + + if prop.fset: + setattr( # noqa: B010 + prop.fset, "_torchscript_modifier", FunctionModifiers.UNUSED + ) + + return prop + + fn._torchscript_modifier = FunctionModifiers.UNUSED + return fn + + +# No op context manager from python side +class _IgnoreContextManager(contextlib.AbstractContextManager): + def __init__(self, **kwargs): + pass + + def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: + pass + + +def ignore(drop=False, **kwargs): + """ + This decorator indicates to the compiler that a function or method should + be ignored and left as a Python function. This allows you to leave code in + your model that is not yet TorchScript compatible. If called from TorchScript, + ignored functions will dispatch the call to the Python interpreter. Models with ignored + functions cannot be exported; use :func:`@torch.jit.unused ` instead. + + Example (using ``@torch.jit.ignore`` on a method):: + + import torch + import torch.nn as nn + + class MyModule(nn.Module): + @torch.jit.ignore + def debugger(self, x): + import pdb + pdb.set_trace() + + def forward(self, x): + x += 10 + # The compiler would normally try to compile `debugger`, + # but since it is `@ignore`d, it will be left as a call + # to Python + self.debugger(x) + return x + + m = torch.jit.script(MyModule()) + + # Error! The call `debugger` cannot be saved since it calls into Python + m.save("m.pt") + + Example (using ``@torch.jit.ignore(drop=True)`` on a method): + + .. testcode:: + + import torch + import torch.nn as nn + + class MyModule(nn.Module): + @torch.jit.ignore(drop=True) + def training_method(self, x): + import pdb + pdb.set_trace() + + def forward(self, x): + if self.training: + self.training_method(x) + return x + + m = torch.jit.script(MyModule()) + + # This is OK since `training_method` is not saved, the call is replaced + # with a `raise`. + m.save("m.pt") + + .. testcleanup:: + + import os + os.remove('m.pt') + """ + + if callable(drop): + # used without any args, so drop is actually a function + # @torch.jit.ignore + # def fn(...): + fn = drop + fn._torchscript_modifier = FunctionModifiers.IGNORE + return fn + + if not isinstance(drop, bool): + raise RuntimeError( + "Argument to @torch.jit.ignore must be a bool or " + f"a function but got {drop}" + ) + + # for backwards compat + drop_on_export = kwargs.pop("drop_on_export", None) + if drop_on_export: + warnings.warn( + "ignore(drop_on_export=True) has been deprecated. TorchScript will now drop the function " + "call on compilation. Use torch.jit.unused now. {}", + category=FutureWarning, + ) + + drop = drop_on_export + elif drop: + warnings.warn( + "ignore(True) has been deprecated. TorchScript will now drop the function " + "call on compilation. Use torch.jit.unused now. {}", + category=FutureWarning, + ) + + def decorator(fn): + if drop: + fn._torchscript_modifier = FunctionModifiers.UNUSED + else: + fn._torchscript_modifier = FunctionModifiers.IGNORE + return fn + + return decorator + + +def _drop(fn): + fn._torchscript_modifier = FunctionModifiers._DROP + return fn + + +def _copy_to_script_wrapper(fn): + fn._torchscript_modifier = FunctionModifiers.COPY_TO_SCRIPT_WRAPPER + return fn + + +def module_has_exports(mod): + for name in dir(mod): + if hasattr(mod, name): + item = getattr(mod, name) + if callable(item): + if get_torchscript_modifier(item) is FunctionModifiers.EXPORT: + return True + return False + + +# WARNING: should_drop is currently being used by our JIT code coverage plug-in to mark JIT'd code as covered. If you +# rename this function, please update references in tools/coverage_plugins_package/src/coverage_plugins/jit_plugin.py to +# allow JIT'd code to still be covered. +def should_drop(fn) -> bool: + attr = get_torchscript_modifier(fn) + if attr is None: + return False + return attr is FunctionModifiers.UNUSED or attr is FunctionModifiers._DROP + + +def is_ignored_fn(fn) -> bool: + mod = get_torchscript_modifier(fn) + return ( + mod is FunctionModifiers.UNUSED + or mod is FunctionModifiers.IGNORE + or mod is FunctionModifiers._DROP + ) + + +def _is_drop_fn(fn) -> bool: + mod = get_torchscript_modifier(fn) + return mod is FunctionModifiers._DROP + + +def is_static_fn(cls, fn) -> bool: + return isinstance(inspect.getattr_static(cls, fn, default=None), staticmethod) + + +def get_static_fn(cls, fn): + return inspect.getattr_static(cls, fn).__func__ + + +def get_torchscript_modifier(fn): + if not callable(fn): + return None + if hasattr(fn, "__func__"): + fn = fn.__func__ + return getattr(fn, "_torchscript_modifier", FunctionModifiers.DEFAULT) + + +def copy_torchscript_modifier(orig, new) -> None: + attr = get_torchscript_modifier(orig) + if attr is None: + return + new._torchscript_modifier = attr + + +# overloading registration +# overloads get registered in this file, and compiled in torch/jit/__init__.py +# so that they can be imported in nn/functional.py without an import cycle + +# qualified_name => list[overload_functions] +_overloaded_fns: Dict[str, List[Callable]] = {} # noqa: T484 + + +_OVERLOAD_EXAMPLE = """ +Example usage of overload function: +@torch.jit._overload +def my_function(x: type0) -> type0: # decl 1 + pass + +@torch.jit._overload +def my_function(x: type1) -> type1: # decl 2 + pass + +def my_function(x): # implementation + if isinstance(x, type0): + return x + elif isinstance(x, type1): + return x +""" + + +def get_overload_no_implementation_error_message(kind, obj): + sourcelines, file_lineno, filename = get_source_lines_and_file(obj) + return ( + f'Implementation for the {kind} "{_qualified_name(obj)}" is missing. Please make ' + f"sure a definition is provided and defined after all overload declarations.\n" + f'File "{filename}", line {file_lineno}:\n' + + "".join(sourcelines) + + "\n" + + _OVERLOAD_EXAMPLE + ) + + +def _check_overload_body(func): + try: + parsed_def = parse_def(func) + except OSError as e: + # Parsing the function definition can raise an OSError if source is unavailable. + # Since this is just an initial check, just raise a warning if this is the case. + warnings.warn( + f"Unable to retrieve source for @torch.jit._overload function: {func}." + ) + return + + body = parsed_def.ast.body[0].body + + def is_pass(x): + return isinstance(x, ast.Pass) + + def is_ellipsis(x): + return isinstance(x, ast.Expr) and isinstance(x.value, ast.Ellipsis) + + if len(body) != 1 or not (is_pass(body[0]) or is_ellipsis(body[0])): + msg = ( + "Only `pass` statement or `...` can be the body of overload declaration:\n" + ) + msg += "\n".join(parsed_def.source.split("\n")[:3]) + msg += " <- Expecting `pass` or `...` here!\n" + _OVERLOAD_EXAMPLE + raise RuntimeError(msg) + + +def _overload(func): + _check_overload_body(func) + qual_name = _qualified_name(func) + global _overloaded_fns + fn_overload_list = _overloaded_fns.get(qual_name) + if fn_overload_list is None: + fn_overload_list = [] + _overloaded_fns[qual_name] = fn_overload_list + fn_overload_list.append(func) + return func + + +def _get_fn_overloads(qual_name): + return _overloaded_fns.get(qual_name) + + +def _clear_fn_overloads(qual_name) -> None: + del _overloaded_fns[qual_name] + + +def get_class_name_lineno(method) -> Tuple[str, int]: + current_frame = inspect.currentframe() + + # one for the get_class_name call, one for _overload_method call + for i in range(2): + assert ( + current_frame is not None + ) # assert current frame is not an Optional[FrameType] + current_frame = current_frame.f_back + + assert current_frame is not None # same here + class_name = current_frame.f_code.co_name + line_no = current_frame.f_code.co_firstlineno + return class_name, line_no + + +# At the point the decorator is applied to class methods the method +# has no reference to its owning class. _qualified_name would not include +# the class it is defined in, so any methods with the same name in the same file +# would have the same _qualified_name, even if they were defined in different +# classes. This problem only exists in python 2. +# We get around this problem by looking at the stack frame and identifying +# the class name, and throwing an error whenever overloads are used +# when modules of the same name are in the same file + +# qualified_name => class name => list[overload_functions] +_overloaded_methods: Dict[str, Dict[str, List[Callable]]] = {} # noqa: T484 + + +# (qualified_name, class name) => class_fileno +_overloaded_method_class_fileno: Dict[Tuple[str, str], int] = {} + + +def _overload_method(func): + _check_overload_body(func) + qual_name = _qualified_name(func) + global _overloaded_methods + class_name_map = _overloaded_methods.get(qual_name, None) + if class_name_map is None: + class_name_map = {} + _overloaded_methods[qual_name] = class_name_map + + class_name, line_no = get_class_name_lineno(func) + method_overloads = class_name_map.get(class_name, None) + if method_overloads is None: + method_overloads = [] + class_name_map[class_name] = method_overloads + _overloaded_method_class_fileno[(qual_name, class_name)] = line_no + else: + existing_lineno = _overloaded_method_class_fileno[(qual_name, class_name)] + if existing_lineno != line_no: + raise RuntimeError( + "Cannot currently overload the same method name in two different" + " classes with the same name in the same module" + ) + + method_overloads.append(func) + return func + + +def _get_overloaded_methods(method, mod_class): + # TODO: __name__ not set for submodules in recursive script + if not hasattr(method, "__name__"): + return None + qual_name = _qualified_name(method) + class_name_map = _overloaded_methods.get(qual_name, None) + if class_name_map is None: + return None + overloads = class_name_map.get(mod_class.__name__, None) + if overloads is None: + return None + + method_line_no = get_source_lines_and_file(method)[1] + mod_class_fileno = get_source_lines_and_file(mod_class)[1] + mod_end_fileno = mod_class_fileno + len(get_source_lines_and_file(mod_class)[0]) + if not (method_line_no >= mod_class_fileno and method_line_no <= mod_end_fileno): + raise Exception( + "Overloads are not useable when a module is redeclared within the same file: " + + str(method) + ) + return overloads + + +def is_tuple(ann) -> bool: + if ann is Tuple: + raise_error_container_parameter_missing("Tuple") + + # For some reason Python 3.7 violates the Type[A, B].__origin__ == Type rule + if not hasattr(ann, "__module__"): + return False + + ann_origin = get_origin(ann) + if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is tuple: + return True + return ann.__module__ == "typing" and (ann_origin is Tuple or ann_origin is tuple) + + +def is_list(ann) -> bool: + if ann is List: + raise_error_container_parameter_missing("List") + + if not hasattr(ann, "__module__"): + return False + + ann_origin = get_origin(ann) + if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is list: + return True + return ann.__module__ == "typing" and (ann_origin is List or ann_origin is list) + + +def is_dict(ann) -> bool: + if ann is Dict: + raise_error_container_parameter_missing("Dict") + + if not hasattr(ann, "__module__"): + return False + + ann_origin = get_origin(ann) + if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is dict: + return True + return ann.__module__ == "typing" and (ann_origin is Dict or ann_origin is dict) + + +def is_union(ann): + if ann is Union: + raise_error_container_parameter_missing("Union") + + return isinstance(ann, BuiltinUnionType) or ( + hasattr(ann, "__module__") + and ann.__module__ == "typing" + and (get_origin(ann) is Union) + ) + + +def is_optional(ann): + if ann is Optional: + raise_error_container_parameter_missing("Optional") + + def is_optional_as_optional(ann): + return ( + hasattr(ann, "__module__") + and ann.__module__ == "typing" + and (get_origin(ann) is Optional) + ) + + def is_union_as_optional(ann): + ann_args = get_args(ann) + return len(ann_args) == 2 and (None in ann_args or type(None) in ann_args) + + return is_optional_as_optional(ann) or (is_union(ann) and is_union_as_optional(ann)) + + +def is_future(ann) -> bool: + if ann is Future: + raise RuntimeError( + "Attempted to use Future without a " + "contained type. Please add a contained type, e.g. " + "Future[int]" + ) + return get_origin(ann) is Future + + +def is_await(ann) -> bool: + if ann is _Await: + return True + return get_origin(ann) is _Await + + +if torch.distributed.rpc.is_available(): + from torch._C._distributed_rpc import PyRRef + from torch.distributed.rpc import RRef + + def is_rref(ann) -> bool: + if ann is RRef: + raise RuntimeError( + "Attempted to use RRef without a " + "contained type. Please add a contained type, e.g. " + "RRef[int]" + ) + return get_origin(ann) is RRef + + def is_rref_instance(obj) -> bool: + return isinstance(obj, PyRRef) + +else: + + def is_rref_instance(obj) -> bool: + # If the RPC module doesn't exist then RRefs don't exist either. + return False + + +def is_final(ann) -> bool: + return ( + hasattr(ann, "__module__") + and ann.__module__ in {"typing", "typing_extensions"} + and (get_origin(ann) is Final or isinstance(ann, type(Final))) + ) + + +# allows BroadcastingList instance to be subscriptable +class BroadcastingListCls: + def __getitem__(self, types): + return + + +# mypy doesn't support parameters on types, so we have to explicitly type each +# list size +BroadcastingList1 = BroadcastingListCls() +for i in range(2, 7): + globals()[f"BroadcastingList{i}"] = BroadcastingList1 + + +def is_scripting() -> bool: + r""" + Function that returns True when in compilation and False otherwise. This + is useful especially with the @unused decorator to leave code in your + model that is not yet TorchScript compatible. + .. testcode:: + + import torch + + @torch.jit.unused + def unsupported_linear_op(x): + return x + + def linear(x): + if torch.jit.is_scripting(): + return torch.linear(x) + else: + return unsupported_linear_op(x) + """ + return False + + +# Retrieves a fully-qualified name (module hierarchy + classname) for a given obj. +def _qualified_name(obj, mangle_name=True) -> str: + # This special case allows us to override the qualified name on a type. + # It's currently used in conjunction with tracing, where we create a + # fake module to filter only supported attributes. However, since this + # new type is defined as a local class, we need a mechanism to override + # its qualname so it appears correctly in the TorchScript system. This, + # we set '_jit_override_qualname' with the original traced module's + # qualified name, which is picked up here + if hasattr(obj, "_jit_override_qualname"): + return obj._jit_override_qualname + # short-circuit in cases where the object already has a known qualified name + if isinstance(obj, torch._C.ScriptFunction): + return obj.qualified_name + + if getattr(obj, "__name__", None): + name = obj.__name__ + # Enum classes do not have `__name__` attr, instead they have `name`. + elif isinstance(obj, enum.Enum): + name = obj.name + else: + raise RuntimeError("Could not get name of python class object") + + if name == "": + name = "_lambda" # make name a valid identifier + + module_name = obj.__module__ + + # If the module is actually a torchbind module, then we should short circuit + if module_name == "torch._classes": + return obj.qualified_name + + # The Python docs are very clear that `__module__` can be None, but I can't + # figure out when it actually would be. + if module_name is None: + raise RuntimeError( + f"Could not get qualified name for class '{name}': " + "__module__ can't be None." + ) + + # if getattr(sys.modules[module_name], name) is not obj: + # raise RuntimeError(f"Could not get qualified name for class '{name}': " + # f"the attr {name} on module {module_name} is not the class") + + # torch.package and TorchScript have separate mangling schemes to avoid + # name collisions from multiple packages. To avoid them interfering with + # each other, normalize the package manging here. + if package_mangling.is_mangled(module_name): + module_name = module_name.replace("<", "_") + module_name = module_name.replace(">", "_") + + # The PythonExceptionValue C++ class in torch/csrc/jit/python/python_sugared_value.h + # does not need mangle the python class name. + if mangle_name: + # __main__ is a builtin module, so rewrite it to "__torch__". + if module_name == "__main__": + module_name = "__torch__" + else: + # Everything else gets a "__torch__" prefix to avoid name collisions + # with the names of user values. + module_name = "__torch__." + module_name + + if "." in name: + raise RuntimeError( + f"Could not get qualified name for class '{name}': " + f"'{name}' is not a valid identifier" + ) + + return module_name + "." + name + + +def _try_get_dispatched_fn(fn): + if not callable(fn): + return None + return boolean_dispatched.get(fn) + + +def _get_named_tuple_properties( + obj, loc: Optional[torch._C._jit_tree_views.SourceRange] = None, rcb=None +): + if loc is None: + loc = fake_range() + + assert issubclass(obj, tuple) and hasattr(obj, "_fields") + if hasattr(obj, "_field_defaults"): + defaults = [ + obj._field_defaults[field] + for field in obj._fields + if field in obj._field_defaults + ] + else: + defaults = [] + # In 3.10 recommended way to get annotations is to call `inspect.get_annotations` function + # Also, annotations from base class are not inherited so they need to be queried explicitly + if sys.version_info[:2] < (3, 10): + obj_annotations = getattr(obj, "__annotations__", {}) + else: + obj_annotations = inspect.get_annotations(obj) + if len(obj_annotations) == 0 and hasattr(obj, "__base__"): + obj_annotations = inspect.get_annotations(obj.__base__) + + annotations = [] + for field in obj._fields: + if field in obj_annotations: + field_type = obj_annotations[field] + # [Note: ForwardRef annotations in NamedTuple attributes] + # NamedTuple types are slightly different from normal types. + # + # Normally, annotations are evaluted like this (during jit.script): + # 1. Load strings of python code into c++ and parse. + # 2. Get annotations as strings + # 3. Use the PythonResolver's resolution callback (rcb) to convert + # the string into a python object + # 4. We call into annotations.py:ann_to_type to convert python obj + # from step 3 into a type that torchscript understands. + # + # NamedTuples are more complicated, because it has sub-types. + # Normally, once we have the NamedTuple type object from #3, + # we can just look at the annotation literal values and use + # ann_to_type directly on them. + # + # But sometimes, users will annotate with string literals, e.g. + # x: 'int' + # This also happens with PEP563 (from __forward__ import annotations) + # + # These annotations appear in the annotation dict as ForwardRef('int'). + # + # Then, we need to convert the string into a python object. This + # requires having local context for custom objects or imported types. + # rcb() is what gives us this. So, we plumb rcb through the stack so + # it can be used in this context for the if block below. + # + # FAQ: + # - Why do we need this special handling for NamedTuple but string + # annotations work fine for normal types? Normally, we parse the + # string directly and then call rcb() directly from C++. + # - Why not use ForwardRef._evaluate? For that, we need globals() + # and locals() for the local context where the NamedTuple was defined. + # rcb is what lets us look up into these. So, basically rcb does the + # hard work for us. + if isinstance(field_type, ForwardRef) and rcb is not None: + rcb_type = rcb(field_type.__forward_arg__) + # rcb returns None if it can't find anything. + if rcb_type is None: + raise ValueError( + f"Unknown type annotation: '{field_type}' in NamedTuple {obj.__name__}." + f" Likely due to partial support for ForwardRef parameters in NamedTuples, see #95858." + f" Issue occurred at {loc.highlight()}" + ) + field_type = rcb_type + the_type = torch.jit.annotations.ann_to_type(field_type, loc, rcb) + annotations.append(the_type) + else: + annotations.append(torch._C.TensorType.getInferred()) + return type(obj).__name__, obj._fields, annotations, defaults + + +def _create_named_tuple( + t, unqual_name: str, field_names: List[str], defaults: Tuple[Any, ...] +): + TupleType = collections.namedtuple(unqual_name, field_names, defaults=defaults) # type: ignore[call-arg, no-redef, misc] + return TupleType(*t) + + +@contextlib.contextmanager +def _disable_emit_hooks(): + hooks = torch._C._jit_get_emit_hooks() + torch._C._jit_set_emit_hooks(None, None) + try: + yield + finally: + torch._C._jit_set_emit_hooks(hooks[0], hooks[1]) + + +def _disable_emit_hooks_decorator(_DecoratorContextManager) -> None: # noqa: F811 + def __enter__(self) -> None: + self.hooks = torch._C._jit_get_emit_hooks() + torch._C._jit_set_emit_hooks(None, None) + + def __exit__(self, *args) -> None: + torch._C._jit_set_emit_hooks(self.hooks[0], self.hooks[1]) + + +def _is_exception(obj) -> bool: + if not inspect.isclass(obj): + return False + return issubclass(obj, Exception) + + +def raise_error_container_parameter_missing(target_type) -> None: + if target_type == "Dict": + raise RuntimeError( + "Attempted to use Dict without " + "contained types. Please add contained type, e.g. " + "Dict[int, int]" + ) + raise RuntimeError( + f"Attempted to use {target_type} without a " + "contained type. Please add a contained type, e.g. " + f"{target_type}[int]" + ) + + +def check_args_exist(target_type) -> None: + if target_type is List or target_type is list: + raise_error_container_parameter_missing("List") + elif target_type is Tuple or target_type is tuple: + raise_error_container_parameter_missing("Tuple") + elif target_type is Dict or target_type is dict: + raise_error_container_parameter_missing("Dict") + elif target_type is None or target_type is Optional: + raise_error_container_parameter_missing("Optional") + + +def check_empty_containers(obj) -> None: + if obj == [] or obj == {} or obj == (): + warnings.warn( + "The inner type of a container is lost when " + "calling torch.jit.isinstance in eager mode. For " + "example, List[int] would become list and " + "therefore falsely return True for List[float] or" + " List[str]." + ) + + +# supports List/Dict/Tuple and Optional types +# TODO support future +def container_checker(obj, target_type) -> bool: + origin_type = get_origin(target_type) + check_args_exist(target_type) + if origin_type is None: + return False + elif origin_type is list or origin_type is List: + check_empty_containers(obj) + if not isinstance(obj, list): + return False + arg_type = get_args(target_type)[0] + arg_origin = get_origin(arg_type) + for el in obj: + # check if nested container, ex: List[List[str]] + if arg_origin: # processes nested container, ex: List[List[str]] + if not container_checker(el, arg_type): + return False + elif not isinstance(el, arg_type): + return False + return True + elif origin_type is Dict or origin_type is dict: + check_empty_containers(obj) + if not isinstance(obj, dict): + return False + key_type = get_args(target_type)[0] + val_type = get_args(target_type)[1] + for key, val in obj.items(): + # check if keys are of right type + if not isinstance(key, key_type): + return False + val_origin = get_origin(val_type) + if val_origin: + if not container_checker(val, val_type): + return False + elif not isinstance(val, val_type): + return False + return True + elif origin_type is Tuple or origin_type is tuple: + check_empty_containers(obj) + if not isinstance(obj, tuple): + return False + arg_types = get_args(target_type) + if len(obj) != len(arg_types): + return False + for el, el_type in zip(obj, arg_types): + el_origin = get_origin(el_type) + if el_origin: + if not container_checker(el, el_type): + return False + elif not isinstance(el, el_type): + return False + return True + elif origin_type is Union or issubclass( + origin_type, BuiltinUnionType + ): # also handles Optional + if obj is None: # check before recursion because None is always fine + return True + inner_types = get_args(target_type) + for t in inner_types: + t_origin = get_origin(t) + if t_origin: + return container_checker(obj, t) + elif isinstance(obj, t): + return True + return False + + +def _isinstance(obj, target_type) -> bool: + if isinstance(target_type, collections.abc.Container): + if not isinstance(target_type, tuple): + raise RuntimeError( + "The second argument to " + "`torch.jit.isinstance` must be a type " + "or a tuple of types" + ) + for t_type in target_type: + if _isinstance(obj, t_type): + return True + return False + + origin_type = get_origin(target_type) + if origin_type: + return container_checker(obj, target_type) + + # Check to handle non-typed optional origin returns as none instead + # of as optional in 3.7-3.8 + check_args_exist(target_type) + + # handle non-containers + return isinstance(obj, target_type) + + +class _TensorExtractor(pickle.Pickler): + def __init__(self, *args, tensors: List[torch.Tensor], **kwargs): + super().__init__(*args, **kwargs) + self.tensors = tensors + + def persistent_id(self, obj): + if isinstance(obj, torch.Tensor): + self.tensors.append(obj) + return "" + # Since we just want to extract tensors, we don't mind if an object is + # unpicklable if it doesn't contain tensors, as we can just ignore/skip + # it. To play it safe, we only do so for common objects that we're sure + # don't contain tensors. Feel free to add new types here. Note also that + # even if a type isn't listed here this won't block users, since thet + # can just add a __getstate__ or __reduce__ method to their class. + if isinstance(obj, LockType): + return "" + # Futures and RRefs don't technically contain a value, they just offer + # the means to access a value. + if isinstance(obj, CFuture) or is_rref_instance(obj): + return "" + if isinstance(obj, CAwait): + return "" + if isinstance(obj, torch.cuda.Event): + return "" + if isinstance(obj, threading.Thread): + return "" + return None + + +def _extract_tensors(obj): + r""" + This function is exclusively called from C++. + See ``torch/csrc/jit/python/python_ivalue.h``. + + It extracts the tensors contained in the given object, through pickling. + """ + tensors: List[torch.Tensor] = [] + extractor = _TensorExtractor(io.BytesIO(), protocol=-1, tensors=tensors) + extractor.dump(obj) + return tensors + + +# In Python-3.11+ typed enums (i.e. IntEnum for example) retain number of base class methods in subclass +# that were previously dropped. To preserve the behavior, explicitly drop them there + +if sys.version_info > (3, 10): + _drop(enum.Enum.__new__) + _drop(enum.Enum.__format__) + _drop(enum.Enum.__repr__) + _drop(enum.Enum.__str__) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_lobpcg.py b/llmeval-env/lib/python3.10/site-packages/torch/_lobpcg.py new file mode 100644 index 0000000000000000000000000000000000000000..6ca1e7294217ab294b245202553c30759adbfc68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_lobpcg.py @@ -0,0 +1,1167 @@ +"""Locally Optimal Block Preconditioned Conjugate Gradient methods. +""" +# Author: Pearu Peterson +# Created: February 2020 + +from typing import Dict, Optional, Tuple + +import torch +from torch import Tensor +from . import _linalg_utils as _utils +from .overrides import handle_torch_function, has_torch_function + + +__all__ = ["lobpcg"] + + +def _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U): + # compute F, such that F_ij = (d_j - d_i)^{-1} for i != j, F_ii = 0 + F = D.unsqueeze(-2) - D.unsqueeze(-1) + F.diagonal(dim1=-2, dim2=-1).fill_(float("inf")) + F.pow_(-1) + + # A.grad = U (D.grad + (U^T U.grad * F)) U^T + Ut = U.mT.contiguous() + res = torch.matmul( + U, torch.matmul(torch.diag_embed(D_grad) + torch.matmul(Ut, U_grad) * F, Ut) + ) + + return res + + +def _polynomial_coefficients_given_roots(roots): + """ + Given the `roots` of a polynomial, find the polynomial's coefficients. + + If roots = (r_1, ..., r_n), then the method returns + coefficients (a_0, a_1, ..., a_n (== 1)) so that + p(x) = (x - r_1) * ... * (x - r_n) + = x^n + a_{n-1} * x^{n-1} + ... a_1 * x_1 + a_0 + + Note: for better performance requires writing a low-level kernel + """ + poly_order = roots.shape[-1] + poly_coeffs_shape = list(roots.shape) + # we assume p(x) = x^n + a_{n-1} * x^{n-1} + ... + a_1 * x + a_0, + # so poly_coeffs = {a_0, ..., a_n, a_{n+1}(== 1)}, + # but we insert one extra coefficient to enable better vectorization below + poly_coeffs_shape[-1] += 2 + poly_coeffs = roots.new_zeros(poly_coeffs_shape) + poly_coeffs[..., 0] = 1 + poly_coeffs[..., -1] = 1 + + # perform the Horner's rule + for i in range(1, poly_order + 1): + # note that it is computationally hard to compute backward for this method, + # because then given the coefficients it would require finding the roots and/or + # calculating the sensitivity based on the Vieta's theorem. + # So the code below tries to circumvent the explicit root finding by series + # of operations on memory copies imitating the Horner's method. + # The memory copies are required to construct nodes in the computational graph + # by exploting the explicit (not in-place, separate node for each step) + # recursion of the Horner's method. + # Needs more memory, O(... * k^2), but with only O(... * k^2) complexity. + poly_coeffs_new = poly_coeffs.clone() if roots.requires_grad else poly_coeffs + out = poly_coeffs_new.narrow(-1, poly_order - i, i + 1) + out -= roots.narrow(-1, i - 1, 1) * poly_coeffs.narrow( + -1, poly_order - i + 1, i + 1 + ) + poly_coeffs = poly_coeffs_new + + return poly_coeffs.narrow(-1, 1, poly_order + 1) + + +def _polynomial_value(poly, x, zero_power, transition): + """ + A generic method for computing poly(x) using the Horner's rule. + + Args: + poly (Tensor): the (possibly batched) 1D Tensor representing + polynomial coefficients such that + poly[..., i] = (a_{i_0}, ..., a{i_n} (==1)), and + poly(x) = poly[..., 0] * zero_power + ... + poly[..., n] * x^n + + x (Tensor): the value (possible batched) to evalate the polynomial `poly` at. + + zero_power (Tensor): the representation of `x^0`. It is application-specific. + + transition (Callable): the function that accepts some intermediate result `int_val`, + the `x` and a specific polynomial coefficient + `poly[..., k]` for some iteration `k`. + It basically performs one iteration of the Horner's rule + defined as `x * int_val + poly[..., k] * zero_power`. + Note that `zero_power` is not a parameter, + because the step `+ poly[..., k] * zero_power` depends on `x`, + whether it is a vector, a matrix, or something else, so this + functionality is delegated to the user. + """ + + res = zero_power.clone() + for k in range(poly.size(-1) - 2, -1, -1): + res = transition(res, x, poly[..., k]) + return res + + +def _matrix_polynomial_value(poly, x, zero_power=None): + """ + Evaluates `poly(x)` for the (batched) matrix input `x`. + Check out `_polynomial_value` function for more details. + """ + + # matrix-aware Horner's rule iteration + def transition(curr_poly_val, x, poly_coeff): + res = x.matmul(curr_poly_val) + res.diagonal(dim1=-2, dim2=-1).add_(poly_coeff.unsqueeze(-1)) + return res + + if zero_power is None: + zero_power = torch.eye( + x.size(-1), x.size(-1), dtype=x.dtype, device=x.device + ).view(*([1] * len(list(x.shape[:-2]))), x.size(-1), x.size(-1)) + + return _polynomial_value(poly, x, zero_power, transition) + + +def _vector_polynomial_value(poly, x, zero_power=None): + """ + Evaluates `poly(x)` for the (batched) vector input `x`. + Check out `_polynomial_value` function for more details. + """ + + # vector-aware Horner's rule iteration + def transition(curr_poly_val, x, poly_coeff): + res = torch.addcmul(poly_coeff.unsqueeze(-1), x, curr_poly_val) + return res + + if zero_power is None: + zero_power = x.new_ones(1).expand(x.shape) + + return _polynomial_value(poly, x, zero_power, transition) + + +def _symeig_backward_partial_eigenspace(D_grad, U_grad, A, D, U, largest): + # compute a projection operator onto an orthogonal subspace spanned by the + # columns of U defined as (I - UU^T) + Ut = U.mT.contiguous() + proj_U_ortho = -U.matmul(Ut) + proj_U_ortho.diagonal(dim1=-2, dim2=-1).add_(1) + + # compute U_ortho, a basis for the orthogonal complement to the span(U), + # by projecting a random [..., m, m - k] matrix onto the subspace spanned + # by the columns of U. + # + # fix generator for determinism + gen = torch.Generator(A.device) + + # orthogonal complement to the span(U) + U_ortho = proj_U_ortho.matmul( + torch.randn( + (*A.shape[:-1], A.size(-1) - D.size(-1)), + dtype=A.dtype, + device=A.device, + generator=gen, + ) + ) + U_ortho_t = U_ortho.mT.contiguous() + + # compute the coefficients of the characteristic polynomial of the tensor D. + # Note that D is diagonal, so the diagonal elements are exactly the roots + # of the characteristic polynomial. + chr_poly_D = _polynomial_coefficients_given_roots(D) + + # the code belows finds the explicit solution to the Sylvester equation + # U_ortho^T A U_ortho dX - dX D = -U_ortho^T A U + # and incorporates it into the whole gradient stored in the `res` variable. + # + # Equivalent to the following naive implementation: + # res = A.new_zeros(A.shape) + # p_res = A.new_zeros(*A.shape[:-1], D.size(-1)) + # for k in range(1, chr_poly_D.size(-1)): + # p_res.zero_() + # for i in range(0, k): + # p_res += (A.matrix_power(k - 1 - i) @ U_grad) * D.pow(i).unsqueeze(-2) + # res -= chr_poly_D[k] * (U_ortho @ poly_D_at_A.inverse() @ U_ortho_t @ p_res @ U.t()) + # + # Note that dX is a differential, so the gradient contribution comes from the backward sensitivity + # Tr(f(U_grad, D_grad, A, U, D)^T dX) = Tr(g(U_grad, A, U, D)^T dA) for some functions f and g, + # and we need to compute g(U_grad, A, U, D) + # + # The naive implementation is based on the paper + # Hu, Qingxi, and Daizhan Cheng. + # "The polynomial solution to the Sylvester matrix equation." + # Applied mathematics letters 19.9 (2006): 859-864. + # + # We can modify the computation of `p_res` from above in a more efficient way + # p_res = U_grad * (chr_poly_D[1] * D.pow(0) + ... + chr_poly_D[k] * D.pow(k)).unsqueeze(-2) + # + A U_grad * (chr_poly_D[2] * D.pow(0) + ... + chr_poly_D[k] * D.pow(k - 1)).unsqueeze(-2) + # + ... + # + A.matrix_power(k - 1) U_grad * chr_poly_D[k] + # Note that this saves us from redundant matrix products with A (elimination of matrix_power) + U_grad_projected = U_grad + series_acc = U_grad_projected.new_zeros(U_grad_projected.shape) + for k in range(1, chr_poly_D.size(-1)): + poly_D = _vector_polynomial_value(chr_poly_D[..., k:], D) + series_acc += U_grad_projected * poly_D.unsqueeze(-2) + U_grad_projected = A.matmul(U_grad_projected) + + # compute chr_poly_D(A) which essentially is: + # + # chr_poly_D_at_A = A.new_zeros(A.shape) + # for k in range(chr_poly_D.size(-1)): + # chr_poly_D_at_A += chr_poly_D[k] * A.matrix_power(k) + # + # Note, however, for better performance we use the Horner's rule + chr_poly_D_at_A = _matrix_polynomial_value(chr_poly_D, A) + + # compute the action of `chr_poly_D_at_A` restricted to U_ortho_t + chr_poly_D_at_A_to_U_ortho = torch.matmul( + U_ortho_t, torch.matmul(chr_poly_D_at_A, U_ortho) + ) + # we need to invert 'chr_poly_D_at_A_to_U_ortho`, for that we compute its + # Cholesky decomposition and then use `torch.cholesky_solve` for better stability. + # Cholesky decomposition requires the input to be positive-definite. + # Note that `chr_poly_D_at_A_to_U_ortho` is positive-definite if + # 1. `largest` == False, or + # 2. `largest` == True and `k` is even + # under the assumption that `A` has distinct eigenvalues. + # + # check if `chr_poly_D_at_A_to_U_ortho` is positive-definite or negative-definite + chr_poly_D_at_A_to_U_ortho_sign = -1 if (largest and (k % 2 == 1)) else +1 + chr_poly_D_at_A_to_U_ortho_L = torch.linalg.cholesky( + chr_poly_D_at_A_to_U_ortho_sign * chr_poly_D_at_A_to_U_ortho + ) + + # compute the gradient part in span(U) + res = _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U) + + # incorporate the Sylvester equation solution into the full gradient + # it resides in span(U_ortho) + res -= U_ortho.matmul( + chr_poly_D_at_A_to_U_ortho_sign + * torch.cholesky_solve( + U_ortho_t.matmul(series_acc), chr_poly_D_at_A_to_U_ortho_L + ) + ).matmul(Ut) + + return res + + +def _symeig_backward(D_grad, U_grad, A, D, U, largest): + # if `U` is square, then the columns of `U` is a complete eigenspace + if U.size(-1) == U.size(-2): + return _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U) + else: + return _symeig_backward_partial_eigenspace(D_grad, U_grad, A, D, U, largest) + + +class LOBPCGAutogradFunction(torch.autograd.Function): + @staticmethod + def forward( # type: ignore[override] + ctx, + A: Tensor, + k: Optional[int] = None, + B: Optional[Tensor] = None, + X: Optional[Tensor] = None, + n: Optional[int] = None, + iK: Optional[Tensor] = None, + niter: Optional[int] = None, + tol: Optional[float] = None, + largest: Optional[bool] = None, + method: Optional[str] = None, + tracker: None = None, + ortho_iparams: Optional[Dict[str, int]] = None, + ortho_fparams: Optional[Dict[str, float]] = None, + ortho_bparams: Optional[Dict[str, bool]] = None, + ) -> Tuple[Tensor, Tensor]: + # makes sure that input is contiguous for efficiency. + # Note: autograd does not support dense gradients for sparse input yet. + A = A.contiguous() if (not A.is_sparse) else A + if B is not None: + B = B.contiguous() if (not B.is_sparse) else B + + D, U = _lobpcg( + A, + k, + B, + X, + n, + iK, + niter, + tol, + largest, + method, + tracker, + ortho_iparams, + ortho_fparams, + ortho_bparams, + ) + + ctx.save_for_backward(A, B, D, U) + ctx.largest = largest + + return D, U + + @staticmethod + def backward(ctx, D_grad, U_grad): + A_grad = B_grad = None + grads = [None] * 14 + + A, B, D, U = ctx.saved_tensors + largest = ctx.largest + + # lobpcg.backward has some limitations. Checks for unsupported input + if A.is_sparse or (B is not None and B.is_sparse and ctx.needs_input_grad[2]): + raise ValueError( + "lobpcg.backward does not support sparse input yet." + "Note that lobpcg.forward does though." + ) + if ( + A.dtype in (torch.complex64, torch.complex128) + or B is not None + and B.dtype in (torch.complex64, torch.complex128) + ): + raise ValueError( + "lobpcg.backward does not support complex input yet." + "Note that lobpcg.forward does though." + ) + if B is not None: + raise ValueError( + "lobpcg.backward does not support backward with B != I yet." + ) + + if largest is None: + largest = True + + # symeig backward + if B is None: + A_grad = _symeig_backward(D_grad, U_grad, A, D, U, largest) + + # A has index 0 + grads[0] = A_grad + # B has index 2 + grads[2] = B_grad + return tuple(grads) + + +def lobpcg( + A: Tensor, + k: Optional[int] = None, + B: Optional[Tensor] = None, + X: Optional[Tensor] = None, + n: Optional[int] = None, + iK: Optional[Tensor] = None, + niter: Optional[int] = None, + tol: Optional[float] = None, + largest: Optional[bool] = None, + method: Optional[str] = None, + tracker: None = None, + ortho_iparams: Optional[Dict[str, int]] = None, + ortho_fparams: Optional[Dict[str, float]] = None, + ortho_bparams: Optional[Dict[str, bool]] = None, +) -> Tuple[Tensor, Tensor]: + """Find the k largest (or smallest) eigenvalues and the corresponding + eigenvectors of a symmetric positive definite generalized + eigenvalue problem using matrix-free LOBPCG methods. + + This function is a front-end to the following LOBPCG algorithms + selectable via `method` argument: + + `method="basic"` - the LOBPCG method introduced by Andrew + Knyazev, see [Knyazev2001]. A less robust method, may fail when + Cholesky is applied to singular input. + + `method="ortho"` - the LOBPCG method with orthogonal basis + selection [StathopoulosEtal2002]. A robust method. + + Supported inputs are dense, sparse, and batches of dense matrices. + + .. note:: In general, the basic method spends least time per + iteration. However, the robust methods converge much faster and + are more stable. So, the usage of the basic method is generally + not recommended but there exist cases where the usage of the + basic method may be preferred. + + .. warning:: The backward method does not support sparse and complex inputs. + It works only when `B` is not provided (i.e. `B == None`). + We are actively working on extensions, and the details of + the algorithms are going to be published promptly. + + .. warning:: While it is assumed that `A` is symmetric, `A.grad` is not. + To make sure that `A.grad` is symmetric, so that `A - t * A.grad` is symmetric + in first-order optimization routines, prior to running `lobpcg` + we do the following symmetrization map: `A -> (A + A.t()) / 2`. + The map is performed only when the `A` requires gradients. + + Args: + + A (Tensor): the input tensor of size :math:`(*, m, m)` + + B (Tensor, optional): the input tensor of size :math:`(*, m, + m)`. When not specified, `B` is interpreted as + identity matrix. + + X (tensor, optional): the input tensor of size :math:`(*, m, n)` + where `k <= n <= m`. When specified, it is used as + initial approximation of eigenvectors. X must be a + dense tensor. + + iK (tensor, optional): the input tensor of size :math:`(*, m, + m)`. When specified, it will be used as preconditioner. + + k (integer, optional): the number of requested + eigenpairs. Default is the number of :math:`X` + columns (when specified) or `1`. + + n (integer, optional): if :math:`X` is not specified then `n` + specifies the size of the generated random + approximation of eigenvectors. Default value for `n` + is `k`. If :math:`X` is specified, the value of `n` + (when specified) must be the number of :math:`X` + columns. + + tol (float, optional): residual tolerance for stopping + criterion. Default is `feps ** 0.5` where `feps` is + smallest non-zero floating-point number of the given + input tensor `A` data type. + + largest (bool, optional): when True, solve the eigenproblem for + the largest eigenvalues. Otherwise, solve the + eigenproblem for smallest eigenvalues. Default is + `True`. + + method (str, optional): select LOBPCG method. See the + description of the function above. Default is + "ortho". + + niter (int, optional): maximum number of iterations. When + reached, the iteration process is hard-stopped and + the current approximation of eigenpairs is returned. + For infinite iteration but until convergence criteria + is met, use `-1`. + + tracker (callable, optional) : a function for tracing the + iteration process. When specified, it is called at + each iteration step with LOBPCG instance as an + argument. The LOBPCG instance holds the full state of + the iteration process in the following attributes: + + `iparams`, `fparams`, `bparams` - dictionaries of + integer, float, and boolean valued input + parameters, respectively + + `ivars`, `fvars`, `bvars`, `tvars` - dictionaries + of integer, float, boolean, and Tensor valued + iteration variables, respectively. + + `A`, `B`, `iK` - input Tensor arguments. + + `E`, `X`, `S`, `R` - iteration Tensor variables. + + For instance: + + `ivars["istep"]` - the current iteration step + `X` - the current approximation of eigenvectors + `E` - the current approximation of eigenvalues + `R` - the current residual + `ivars["converged_count"]` - the current number of converged eigenpairs + `tvars["rerr"]` - the current state of convergence criteria + + Note that when `tracker` stores Tensor objects from + the LOBPCG instance, it must make copies of these. + + If `tracker` sets `bvars["force_stop"] = True`, the + iteration process will be hard-stopped. + + ortho_iparams, ortho_fparams, ortho_bparams (dict, optional): + various parameters to LOBPCG algorithm when using + `method="ortho"`. + + Returns: + + E (Tensor): tensor of eigenvalues of size :math:`(*, k)` + + X (Tensor): tensor of eigenvectors of size :math:`(*, m, k)` + + References: + + [Knyazev2001] Andrew V. Knyazev. (2001) Toward the Optimal + Preconditioned Eigensolver: Locally Optimal Block Preconditioned + Conjugate Gradient Method. SIAM J. Sci. Comput., 23(2), + 517-541. (25 pages) + https://epubs.siam.org/doi/abs/10.1137/S1064827500366124 + + [StathopoulosEtal2002] Andreas Stathopoulos and Kesheng + Wu. (2002) A Block Orthogonalization Procedure with Constant + Synchronization Requirements. SIAM J. Sci. Comput., 23(6), + 2165-2182. (18 pages) + https://epubs.siam.org/doi/10.1137/S1064827500370883 + + [DuerschEtal2018] Jed A. Duersch, Meiyue Shao, Chao Yang, Ming + Gu. (2018) A Robust and Efficient Implementation of LOBPCG. + SIAM J. Sci. Comput., 40(5), C655-C676. (22 pages) + https://epubs.siam.org/doi/abs/10.1137/17M1129830 + + """ + + if not torch.jit.is_scripting(): + tensor_ops = (A, B, X, iK) + if not set(map(type, tensor_ops)).issubset( + (torch.Tensor, type(None)) + ) and has_torch_function(tensor_ops): + return handle_torch_function( + lobpcg, + tensor_ops, + A, + k=k, + B=B, + X=X, + n=n, + iK=iK, + niter=niter, + tol=tol, + largest=largest, + method=method, + tracker=tracker, + ortho_iparams=ortho_iparams, + ortho_fparams=ortho_fparams, + ortho_bparams=ortho_bparams, + ) + + if not torch._jit_internal.is_scripting(): + if A.requires_grad or (B is not None and B.requires_grad): + # While it is expected that `A` is symmetric, + # the `A_grad` might be not. Therefore we perform the trick below, + # so that `A_grad` becomes symmetric. + # The symmetrization is important for first-order optimization methods, + # so that (A - alpha * A_grad) is still a symmetric matrix. + # Same holds for `B`. + A_sym = (A + A.mT) / 2 + B_sym = (B + B.mT) / 2 if (B is not None) else None + + return LOBPCGAutogradFunction.apply( + A_sym, + k, + B_sym, + X, + n, + iK, + niter, + tol, + largest, + method, + tracker, + ortho_iparams, + ortho_fparams, + ortho_bparams, + ) + else: + if A.requires_grad or (B is not None and B.requires_grad): + raise RuntimeError( + "Script and require grads is not supported atm." + "If you just want to do the forward, use .detach()" + "on A and B before calling into lobpcg" + ) + + return _lobpcg( + A, + k, + B, + X, + n, + iK, + niter, + tol, + largest, + method, + tracker, + ortho_iparams, + ortho_fparams, + ortho_bparams, + ) + + +def _lobpcg( + A: Tensor, + k: Optional[int] = None, + B: Optional[Tensor] = None, + X: Optional[Tensor] = None, + n: Optional[int] = None, + iK: Optional[Tensor] = None, + niter: Optional[int] = None, + tol: Optional[float] = None, + largest: Optional[bool] = None, + method: Optional[str] = None, + tracker: None = None, + ortho_iparams: Optional[Dict[str, int]] = None, + ortho_fparams: Optional[Dict[str, float]] = None, + ortho_bparams: Optional[Dict[str, bool]] = None, +) -> Tuple[Tensor, Tensor]: + # A must be square: + assert A.shape[-2] == A.shape[-1], A.shape + if B is not None: + # A and B must have the same shapes: + assert A.shape == B.shape, (A.shape, B.shape) + + dtype = _utils.get_floating_dtype(A) + device = A.device + if tol is None: + feps = {torch.float32: 1.2e-07, torch.float64: 2.23e-16}[dtype] + tol = feps**0.5 + + m = A.shape[-1] + k = (1 if X is None else X.shape[-1]) if k is None else k + n = (k if n is None else n) if X is None else X.shape[-1] + + if m < 3 * n: + raise ValueError( + f"LPBPCG algorithm is not applicable when the number of A rows (={m})" + f" is smaller than 3 x the number of requested eigenpairs (={n})" + ) + + method = "ortho" if method is None else method + + iparams = { + "m": m, + "n": n, + "k": k, + "niter": 1000 if niter is None else niter, + } + + fparams = { + "tol": tol, + } + + bparams = {"largest": True if largest is None else largest} + + if method == "ortho": + if ortho_iparams is not None: + iparams.update(ortho_iparams) + if ortho_fparams is not None: + fparams.update(ortho_fparams) + if ortho_bparams is not None: + bparams.update(ortho_bparams) + iparams["ortho_i_max"] = iparams.get("ortho_i_max", 3) + iparams["ortho_j_max"] = iparams.get("ortho_j_max", 3) + fparams["ortho_tol"] = fparams.get("ortho_tol", tol) + fparams["ortho_tol_drop"] = fparams.get("ortho_tol_drop", tol) + fparams["ortho_tol_replace"] = fparams.get("ortho_tol_replace", tol) + bparams["ortho_use_drop"] = bparams.get("ortho_use_drop", False) + + if not torch.jit.is_scripting(): + LOBPCG.call_tracker = LOBPCG_call_tracker # type: ignore[method-assign] + + if len(A.shape) > 2: + N = int(torch.prod(torch.tensor(A.shape[:-2]))) + bA = A.reshape((N,) + A.shape[-2:]) + bB = B.reshape((N,) + A.shape[-2:]) if B is not None else None + bX = X.reshape((N,) + X.shape[-2:]) if X is not None else None + bE = torch.empty((N, k), dtype=dtype, device=device) + bXret = torch.empty((N, m, k), dtype=dtype, device=device) + + for i in range(N): + A_ = bA[i] + B_ = bB[i] if bB is not None else None + X_ = ( + torch.randn((m, n), dtype=dtype, device=device) if bX is None else bX[i] + ) + assert len(X_.shape) == 2 and X_.shape == (m, n), (X_.shape, (m, n)) + iparams["batch_index"] = i + worker = LOBPCG(A_, B_, X_, iK, iparams, fparams, bparams, method, tracker) + worker.run() + bE[i] = worker.E[:k] + bXret[i] = worker.X[:, :k] + + if not torch.jit.is_scripting(): + LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[method-assign] + + return bE.reshape(A.shape[:-2] + (k,)), bXret.reshape(A.shape[:-2] + (m, k)) + + X = torch.randn((m, n), dtype=dtype, device=device) if X is None else X + assert len(X.shape) == 2 and X.shape == (m, n), (X.shape, (m, n)) + + worker = LOBPCG(A, B, X, iK, iparams, fparams, bparams, method, tracker) + + worker.run() + + if not torch.jit.is_scripting(): + LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[method-assign] + + return worker.E[:k], worker.X[:, :k] + + +class LOBPCG: + """Worker class of LOBPCG methods.""" + + def __init__( + self, + A: Optional[Tensor], + B: Optional[Tensor], + X: Tensor, + iK: Optional[Tensor], + iparams: Dict[str, int], + fparams: Dict[str, float], + bparams: Dict[str, bool], + method: str, + tracker: None, + ) -> None: + # constant parameters + self.A = A + self.B = B + self.iK = iK + self.iparams = iparams + self.fparams = fparams + self.bparams = bparams + self.method = method + self.tracker = tracker + m = iparams["m"] + n = iparams["n"] + + # variable parameters + self.X = X + self.E = torch.zeros((n,), dtype=X.dtype, device=X.device) + self.R = torch.zeros((m, n), dtype=X.dtype, device=X.device) + self.S = torch.zeros((m, 3 * n), dtype=X.dtype, device=X.device) + self.tvars: Dict[str, Tensor] = {} + self.ivars: Dict[str, int] = {"istep": 0} + self.fvars: Dict[str, float] = {"_": 0.0} + self.bvars: Dict[str, bool] = {"_": False} + + def __str__(self): + lines = ["LOPBCG:"] + lines += [f" iparams={self.iparams}"] + lines += [f" fparams={self.fparams}"] + lines += [f" bparams={self.bparams}"] + lines += [f" ivars={self.ivars}"] + lines += [f" fvars={self.fvars}"] + lines += [f" bvars={self.bvars}"] + lines += [f" tvars={self.tvars}"] + lines += [f" A={self.A}"] + lines += [f" B={self.B}"] + lines += [f" iK={self.iK}"] + lines += [f" X={self.X}"] + lines += [f" E={self.E}"] + r = "" + for line in lines: + r += line + "\n" + return r + + def update(self): + """Set and update iteration variables.""" + if self.ivars["istep"] == 0: + X_norm = float(torch.norm(self.X)) + iX_norm = X_norm**-1 + A_norm = float(torch.norm(_utils.matmul(self.A, self.X))) * iX_norm + B_norm = float(torch.norm(_utils.matmul(self.B, self.X))) * iX_norm + self.fvars["X_norm"] = X_norm + self.fvars["A_norm"] = A_norm + self.fvars["B_norm"] = B_norm + self.ivars["iterations_left"] = self.iparams["niter"] + self.ivars["converged_count"] = 0 + self.ivars["converged_end"] = 0 + + if self.method == "ortho": + self._update_ortho() + else: + self._update_basic() + + self.ivars["iterations_left"] = self.ivars["iterations_left"] - 1 + self.ivars["istep"] = self.ivars["istep"] + 1 + + def update_residual(self): + """Update residual R from A, B, X, E.""" + mm = _utils.matmul + self.R = mm(self.A, self.X) - mm(self.B, self.X) * self.E + + def update_converged_count(self): + """Determine the number of converged eigenpairs using backward stable + convergence criterion, see discussion in Sec 4.3 of [DuerschEtal2018]. + + Users may redefine this method for custom convergence criteria. + """ + # (...) -> int + prev_count = self.ivars["converged_count"] + tol = self.fparams["tol"] + A_norm = self.fvars["A_norm"] + B_norm = self.fvars["B_norm"] + E, X, R = self.E, self.X, self.R + rerr = ( + torch.norm(R, 2, (0,)) + * (torch.norm(X, 2, (0,)) * (A_norm + E[: X.shape[-1]] * B_norm)) ** -1 + ) + converged = rerr < tol + count = 0 + for b in converged: + if not b: + # ignore convergence of following pairs to ensure + # strict ordering of eigenpairs + break + count += 1 + assert ( + count >= prev_count + ), f"the number of converged eigenpairs (was {prev_count}, got {count}) cannot decrease" + self.ivars["converged_count"] = count + self.tvars["rerr"] = rerr + return count + + def stop_iteration(self): + """Return True to stop iterations. + + Note that tracker (if defined) can force-stop iterations by + setting ``worker.bvars['force_stop'] = True``. + """ + return ( + self.bvars.get("force_stop", False) + or self.ivars["iterations_left"] == 0 + or self.ivars["converged_count"] >= self.iparams["k"] + ) + + def run(self): + """Run LOBPCG iterations. + + Use this method as a template for implementing LOBPCG + iteration scheme with custom tracker that is compatible with + TorchScript. + """ + self.update() + + if not torch.jit.is_scripting() and self.tracker is not None: + self.call_tracker() + + while not self.stop_iteration(): + self.update() + + if not torch.jit.is_scripting() and self.tracker is not None: + self.call_tracker() + + @torch.jit.unused + def call_tracker(self): + """Interface for tracking iteration process in Python mode. + + Tracking the iteration process is disabled in TorchScript + mode. In fact, one should specify tracker=None when JIT + compiling functions using lobpcg. + """ + # do nothing when in TorchScript mode + pass + + # Internal methods + + def _update_basic(self): + """ + Update or initialize iteration variables when `method == "basic"`. + """ + mm = torch.matmul + ns = self.ivars["converged_end"] + nc = self.ivars["converged_count"] + n = self.iparams["n"] + largest = self.bparams["largest"] + + if self.ivars["istep"] == 0: + Ri = self._get_rayleigh_ritz_transform(self.X) + M = _utils.qform(_utils.qform(self.A, self.X), Ri) + E, Z = _utils.symeig(M, largest) + self.X[:] = mm(self.X, mm(Ri, Z)) + self.E[:] = E + np = 0 + self.update_residual() + nc = self.update_converged_count() + self.S[..., :n] = self.X + + W = _utils.matmul(self.iK, self.R) + self.ivars["converged_end"] = ns = n + np + W.shape[-1] + self.S[:, n + np : ns] = W + else: + S_ = self.S[:, nc:ns] + Ri = self._get_rayleigh_ritz_transform(S_) + M = _utils.qform(_utils.qform(self.A, S_), Ri) + E_, Z = _utils.symeig(M, largest) + self.X[:, nc:] = mm(S_, mm(Ri, Z[:, : n - nc])) + self.E[nc:] = E_[: n - nc] + P = mm(S_, mm(Ri, Z[:, n : 2 * n - nc])) + np = P.shape[-1] + + self.update_residual() + nc = self.update_converged_count() + self.S[..., :n] = self.X + self.S[:, n : n + np] = P + W = _utils.matmul(self.iK, self.R[:, nc:]) + + self.ivars["converged_end"] = ns = n + np + W.shape[-1] + self.S[:, n + np : ns] = W + + def _update_ortho(self): + """ + Update or initialize iteration variables when `method == "ortho"`. + """ + mm = torch.matmul + ns = self.ivars["converged_end"] + nc = self.ivars["converged_count"] + n = self.iparams["n"] + largest = self.bparams["largest"] + + if self.ivars["istep"] == 0: + Ri = self._get_rayleigh_ritz_transform(self.X) + M = _utils.qform(_utils.qform(self.A, self.X), Ri) + E, Z = _utils.symeig(M, largest) + self.X = mm(self.X, mm(Ri, Z)) + self.update_residual() + np = 0 + nc = self.update_converged_count() + self.S[:, :n] = self.X + W = self._get_ortho(self.R, self.X) + ns = self.ivars["converged_end"] = n + np + W.shape[-1] + self.S[:, n + np : ns] = W + + else: + S_ = self.S[:, nc:ns] + # Rayleigh-Ritz procedure + E_, Z = _utils.symeig(_utils.qform(self.A, S_), largest) + + # Update E, X, P + self.X[:, nc:] = mm(S_, Z[:, : n - nc]) + self.E[nc:] = E_[: n - nc] + P = mm( + S_, + mm( + Z[:, n - nc :], + _utils.basis(_utils.transpose(Z[: n - nc, n - nc :])), + ), + ) + np = P.shape[-1] + + # check convergence + self.update_residual() + nc = self.update_converged_count() + + # update S + self.S[:, :n] = self.X + self.S[:, n : n + np] = P + W = self._get_ortho(self.R[:, nc:], self.S[:, : n + np]) + ns = self.ivars["converged_end"] = n + np + W.shape[-1] + self.S[:, n + np : ns] = W + + def _get_rayleigh_ritz_transform(self, S): + """Return a transformation matrix that is used in Rayleigh-Ritz + procedure for reducing a general eigenvalue problem :math:`(S^TAS) + C = (S^TBS) C E` to a standard eigenvalue problem :math: `(Ri^T + S^TAS Ri) Z = Z E` where `C = Ri Z`. + + .. note:: In the original Rayleight-Ritz procedure in + [DuerschEtal2018], the problem is formulated as follows:: + + SAS = S^T A S + SBS = S^T B S + D = () ** -1/2 + R^T R = Cholesky(D SBS D) + Ri = D R^-1 + solve symeig problem Ri^T SAS Ri Z = Theta Z + C = Ri Z + + To reduce the number of matrix products (denoted by empty + space between matrices), here we introduce element-wise + products (denoted by symbol `*`) so that the Rayleight-Ritz + procedure becomes:: + + SAS = S^T A S + SBS = S^T B S + d = () ** -1/2 # this is 1-d column vector + dd = d d^T # this is 2-d matrix + R^T R = Cholesky(dd * SBS) + Ri = R^-1 * d # broadcasting + solve symeig problem Ri^T SAS Ri Z = Theta Z + C = Ri Z + + where `dd` is 2-d matrix that replaces matrix products `D M + D` with one element-wise product `M * dd`; and `d` replaces + matrix product `D M` with element-wise product `M * + d`. Also, creating the diagonal matrix `D` is avoided. + + Args: + S (Tensor): the matrix basis for the search subspace, size is + :math:`(m, n)`. + + Returns: + Ri (tensor): upper-triangular transformation matrix of size + :math:`(n, n)`. + + """ + B = self.B + mm = torch.matmul + SBS = _utils.qform(B, S) + d_row = SBS.diagonal(0, -2, -1) ** -0.5 + d_col = d_row.reshape(d_row.shape[0], 1) + # TODO use torch.linalg.cholesky_solve once it is implemented + R = torch.linalg.cholesky((SBS * d_row) * d_col, upper=True) + return torch.linalg.solve_triangular( + R, d_row.diag_embed(), upper=True, left=False + ) + + def _get_svqb( + self, U: Tensor, drop: bool, tau: float # Tensor # bool # float + ) -> Tensor: + """Return B-orthonormal U. + + .. note:: When `drop` is `False` then `svqb` is based on the + Algorithm 4 from [DuerschPhD2015] that is a slight + modification of the corresponding algorithm + introduced in [StathopolousWu2002]. + + Args: + + U (Tensor) : initial approximation, size is (m, n) + drop (bool) : when True, drop columns that + contribution to the `span([U])` is small. + tau (float) : positive tolerance + + Returns: + + U (Tensor) : B-orthonormal columns (:math:`U^T B U = I`), size + is (m, n1), where `n1 = n` if `drop` is `False, + otherwise `n1 <= n`. + + """ + if torch.numel(U) == 0: + return U + UBU = _utils.qform(self.B, U) + d = UBU.diagonal(0, -2, -1) + + # Detect and drop exact zero columns from U. While the test + # `abs(d) == 0` is unlikely to be True for random data, it is + # possible to construct input data to lobpcg where it will be + # True leading to a failure (notice the `d ** -0.5` operation + # in the original algorithm). To prevent the failure, we drop + # the exact zero columns here and then continue with the + # original algorithm below. + nz = torch.where(abs(d) != 0.0) + assert len(nz) == 1, nz + if len(nz[0]) < len(d): + U = U[:, nz[0]] + if torch.numel(U) == 0: + return U + UBU = _utils.qform(self.B, U) + d = UBU.diagonal(0, -2, -1) + nz = torch.where(abs(d) != 0.0) + assert len(nz[0]) == len(d) + + # The original algorithm 4 from [DuerschPhD2015]. + d_col = (d**-0.5).reshape(d.shape[0], 1) + DUBUD = (UBU * d_col) * _utils.transpose(d_col) + E, Z = _utils.symeig(DUBUD) + t = tau * abs(E).max() + if drop: + keep = torch.where(E > t) + assert len(keep) == 1, keep + E = E[keep[0]] + Z = Z[:, keep[0]] + d_col = d_col[keep[0]] + else: + E[(torch.where(E < t))[0]] = t + + return torch.matmul(U * _utils.transpose(d_col), Z * E**-0.5) + + def _get_ortho(self, U, V): + """Return B-orthonormal U with columns are B-orthogonal to V. + + .. note:: When `bparams["ortho_use_drop"] == False` then + `_get_ortho` is based on the Algorithm 3 from + [DuerschPhD2015] that is a slight modification of + the corresponding algorithm introduced in + [StathopolousWu2002]. Otherwise, the method + implements Algorithm 6 from [DuerschPhD2015] + + .. note:: If all U columns are B-collinear to V then the + returned tensor U will be empty. + + Args: + + U (Tensor) : initial approximation, size is (m, n) + V (Tensor) : B-orthogonal external basis, size is (m, k) + + Returns: + + U (Tensor) : B-orthonormal columns (:math:`U^T B U = I`) + such that :math:`V^T B U=0`, size is (m, n1), + where `n1 = n` if `drop` is `False, otherwise + `n1 <= n`. + """ + mm = torch.matmul + mm_B = _utils.matmul + m = self.iparams["m"] + tau_ortho = self.fparams["ortho_tol"] + tau_drop = self.fparams["ortho_tol_drop"] + tau_replace = self.fparams["ortho_tol_replace"] + i_max = self.iparams["ortho_i_max"] + j_max = self.iparams["ortho_j_max"] + # when use_drop==True, enable dropping U columns that have + # small contribution to the `span([U, V])`. + use_drop = self.bparams["ortho_use_drop"] + + # clean up variables from the previous call + for vkey in list(self.fvars.keys()): + if vkey.startswith("ortho_") and vkey.endswith("_rerr"): + self.fvars.pop(vkey) + self.ivars.pop("ortho_i", 0) + self.ivars.pop("ortho_j", 0) + + BV_norm = torch.norm(mm_B(self.B, V)) + BU = mm_B(self.B, U) + VBU = mm(_utils.transpose(V), BU) + i = j = 0 + stats = "" + for i in range(i_max): + U = U - mm(V, VBU) + drop = False + tau_svqb = tau_drop + for j in range(j_max): + if use_drop: + U = self._get_svqb(U, drop, tau_svqb) + drop = True + tau_svqb = tau_replace + else: + U = self._get_svqb(U, False, tau_replace) + if torch.numel(U) == 0: + # all initial U columns are B-collinear to V + self.ivars["ortho_i"] = i + self.ivars["ortho_j"] = j + return U + BU = mm_B(self.B, U) + UBU = mm(_utils.transpose(U), BU) + U_norm = torch.norm(U) + BU_norm = torch.norm(BU) + R = UBU - torch.eye(UBU.shape[-1], device=UBU.device, dtype=UBU.dtype) + R_norm = torch.norm(R) + # https://github.com/pytorch/pytorch/issues/33810 workaround: + rerr = float(R_norm) * float(BU_norm * U_norm) ** -1 + vkey = f"ortho_UBUmI_rerr[{i}, {j}]" + self.fvars[vkey] = rerr + if rerr < tau_ortho: + break + VBU = mm(_utils.transpose(V), BU) + VBU_norm = torch.norm(VBU) + U_norm = torch.norm(U) + rerr = float(VBU_norm) * float(BV_norm * U_norm) ** -1 + vkey = f"ortho_VBU_rerr[{i}]" + self.fvars[vkey] = rerr + if rerr < tau_ortho: + break + if m < U.shape[-1] + V.shape[-1]: + # TorchScript needs the class var to be assigned to a local to + # do optional type refinement + B = self.B + assert B is not None + raise ValueError( + "Overdetermined shape of U:" + f" #B-cols(={B.shape[-1]}) >= #U-cols(={U.shape[-1]}) + #V-cols(={V.shape[-1]}) must hold" + ) + self.ivars["ortho_i"] = i + self.ivars["ortho_j"] = j + return U + + +# Calling tracker is separated from LOBPCG definitions because +# TorchScript does not support user-defined callback arguments: +LOBPCG_call_tracker_orig = LOBPCG.call_tracker + + +def LOBPCG_call_tracker(self): + self.tracker(self) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_lowrank.py b/llmeval-env/lib/python3.10/site-packages/torch/_lowrank.py new file mode 100644 index 0000000000000000000000000000000000000000..fe5a1f3da71d0f5be7c48a4b7cc31fad85f4147e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_lowrank.py @@ -0,0 +1,298 @@ +"""Implement various linear algebra algorithms for low rank matrices. +""" + +__all__ = ["svd_lowrank", "pca_lowrank"] + +from typing import Optional, Tuple + +import torch +from torch import Tensor +from . import _linalg_utils as _utils +from .overrides import handle_torch_function, has_torch_function + + +def get_approximate_basis( + A: Tensor, q: int, niter: Optional[int] = 2, M: Optional[Tensor] = None +) -> Tensor: + """Return tensor :math:`Q` with :math:`q` orthonormal columns such + that :math:`Q Q^H A` approximates :math:`A`. If :math:`M` is + specified, then :math:`Q` is such that :math:`Q Q^H (A - M)` + approximates :math:`A - M`. + + .. note:: The implementation is based on the Algorithm 4.4 from + Halko et al, 2009. + + .. note:: For an adequate approximation of a k-rank matrix + :math:`A`, where k is not known in advance but could be + estimated, the number of :math:`Q` columns, q, can be + choosen according to the following criteria: in general, + :math:`k <= q <= min(2*k, m, n)`. For large low-rank + matrices, take :math:`q = k + 5..10`. If k is + relatively small compared to :math:`min(m, n)`, choosing + :math:`q = k + 0..2` may be sufficient. + + .. note:: To obtain repeatable results, reset the seed for the + pseudorandom number generator + + Args:: + A (Tensor): the input tensor of size :math:`(*, m, n)` + + q (int): the dimension of subspace spanned by :math:`Q` + columns. + + niter (int, optional): the number of subspace iterations to + conduct; ``niter`` must be a + nonnegative integer. In most cases, the + default value 2 is more than enough. + + M (Tensor, optional): the input tensor's mean of size + :math:`(*, 1, n)`. + + References:: + - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding + structure with randomness: probabilistic algorithms for + constructing approximate matrix decompositions, + arXiv:0909.4061 [math.NA; math.PR], 2009 (available at + `arXiv `_). + """ + + niter = 2 if niter is None else niter + m, n = A.shape[-2:] + dtype = _utils.get_floating_dtype(A) + matmul = _utils.matmul + + R = torch.randn(n, q, dtype=dtype, device=A.device) + + # The following code could be made faster using torch.geqrf + torch.ormqr + # but geqrf is not differentiable + A_H = _utils.transjugate(A) + if M is None: + Q = torch.linalg.qr(matmul(A, R)).Q + for i in range(niter): + Q = torch.linalg.qr(matmul(A_H, Q)).Q + Q = torch.linalg.qr(matmul(A, Q)).Q + else: + M_H = _utils.transjugate(M) + Q = torch.linalg.qr(matmul(A, R) - matmul(M, R)).Q + for i in range(niter): + Q = torch.linalg.qr(matmul(A_H, Q) - matmul(M_H, Q)).Q + Q = torch.linalg.qr(matmul(A, Q) - matmul(M, Q)).Q + + return Q + + +def svd_lowrank( + A: Tensor, + q: Optional[int] = 6, + niter: Optional[int] = 2, + M: Optional[Tensor] = None, +) -> Tuple[Tensor, Tensor, Tensor]: + r"""Return the singular value decomposition ``(U, S, V)`` of a matrix, + batches of matrices, or a sparse matrix :math:`A` such that + :math:`A \approx U diag(S) V^T`. In case :math:`M` is given, then + SVD is computed for the matrix :math:`A - M`. + + .. note:: The implementation is based on the Algorithm 5.1 from + Halko et al, 2009. + + .. note:: To obtain repeatable results, reset the seed for the + pseudorandom number generator + + .. note:: The input is assumed to be a low-rank matrix. + + .. note:: In general, use the full-rank SVD implementation + :func:`torch.linalg.svd` for dense matrices due to its 10-fold + higher performance characteristics. The low-rank SVD + will be useful for huge sparse matrices that + :func:`torch.linalg.svd` cannot handle. + + Args:: + A (Tensor): the input tensor of size :math:`(*, m, n)` + + q (int, optional): a slightly overestimated rank of A. + + niter (int, optional): the number of subspace iterations to + conduct; niter must be a nonnegative + integer, and defaults to 2 + + M (Tensor, optional): the input tensor's mean of size + :math:`(*, 1, n)`. + + References:: + - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding + structure with randomness: probabilistic algorithms for + constructing approximate matrix decompositions, + arXiv:0909.4061 [math.NA; math.PR], 2009 (available at + `arXiv `_). + + """ + if not torch.jit.is_scripting(): + tensor_ops = (A, M) + if not set(map(type, tensor_ops)).issubset( + (torch.Tensor, type(None)) + ) and has_torch_function(tensor_ops): + return handle_torch_function( + svd_lowrank, tensor_ops, A, q=q, niter=niter, M=M + ) + return _svd_lowrank(A, q=q, niter=niter, M=M) + + +def _svd_lowrank( + A: Tensor, + q: Optional[int] = 6, + niter: Optional[int] = 2, + M: Optional[Tensor] = None, +) -> Tuple[Tensor, Tensor, Tensor]: + q = 6 if q is None else q + m, n = A.shape[-2:] + matmul = _utils.matmul + if M is None: + M_t = None + else: + M_t = _utils.transpose(M) + A_t = _utils.transpose(A) + + # Algorithm 5.1 in Halko et al 2009, slightly modified to reduce + # the number conjugate and transpose operations + if m < n or n > q: + # computing the SVD approximation of a transpose in + # order to keep B shape minimal (the m < n case) or the V + # shape small (the n > q case) + Q = get_approximate_basis(A_t, q, niter=niter, M=M_t) + Q_c = _utils.conjugate(Q) + if M is None: + B_t = matmul(A, Q_c) + else: + B_t = matmul(A, Q_c) - matmul(M, Q_c) + assert B_t.shape[-2] == m, (B_t.shape, m) + assert B_t.shape[-1] == q, (B_t.shape, q) + assert B_t.shape[-1] <= B_t.shape[-2], B_t.shape + U, S, Vh = torch.linalg.svd(B_t, full_matrices=False) + V = Vh.mH + V = Q.matmul(V) + else: + Q = get_approximate_basis(A, q, niter=niter, M=M) + Q_c = _utils.conjugate(Q) + if M is None: + B = matmul(A_t, Q_c) + else: + B = matmul(A_t, Q_c) - matmul(M_t, Q_c) + B_t = _utils.transpose(B) + assert B_t.shape[-2] == q, (B_t.shape, q) + assert B_t.shape[-1] == n, (B_t.shape, n) + assert B_t.shape[-1] <= B_t.shape[-2], B_t.shape + U, S, Vh = torch.linalg.svd(B_t, full_matrices=False) + V = Vh.mH + U = Q.matmul(U) + + return U, S, V + + +def pca_lowrank( + A: Tensor, q: Optional[int] = None, center: bool = True, niter: int = 2 +) -> Tuple[Tensor, Tensor, Tensor]: + r"""Performs linear Principal Component Analysis (PCA) on a low-rank + matrix, batches of such matrices, or sparse matrix. + + This function returns a namedtuple ``(U, S, V)`` which is the + nearly optimal approximation of a singular value decomposition of + a centered matrix :math:`A` such that :math:`A = U diag(S) V^T`. + + .. note:: The relation of ``(U, S, V)`` to PCA is as follows: + + - :math:`A` is a data matrix with ``m`` samples and + ``n`` features + + - the :math:`V` columns represent the principal directions + + - :math:`S ** 2 / (m - 1)` contains the eigenvalues of + :math:`A^T A / (m - 1)` which is the covariance of + ``A`` when ``center=True`` is provided. + + - ``matmul(A, V[:, :k])`` projects data to the first k + principal components + + .. note:: Different from the standard SVD, the size of returned + matrices depend on the specified rank and q + values as follows: + + - :math:`U` is m x q matrix + + - :math:`S` is q-vector + + - :math:`V` is n x q matrix + + .. note:: To obtain repeatable results, reset the seed for the + pseudorandom number generator + + Args: + + A (Tensor): the input tensor of size :math:`(*, m, n)` + + q (int, optional): a slightly overestimated rank of + :math:`A`. By default, ``q = min(6, m, + n)``. + + center (bool, optional): if True, center the input tensor, + otherwise, assume that the input is + centered. + + niter (int, optional): the number of subspace iterations to + conduct; niter must be a nonnegative + integer, and defaults to 2. + + References:: + + - Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding + structure with randomness: probabilistic algorithms for + constructing approximate matrix decompositions, + arXiv:0909.4061 [math.NA; math.PR], 2009 (available at + `arXiv `_). + + """ + + if not torch.jit.is_scripting(): + if type(A) is not torch.Tensor and has_torch_function((A,)): + return handle_torch_function( + pca_lowrank, (A,), A, q=q, center=center, niter=niter + ) + + (m, n) = A.shape[-2:] + + if q is None: + q = min(6, m, n) + elif not (q >= 0 and q <= min(m, n)): + raise ValueError( + f"q(={q}) must be non-negative integer and not greater than min(m, n)={min(m, n)}" + ) + if not (niter >= 0): + raise ValueError(f"niter(={niter}) must be non-negative integer") + + dtype = _utils.get_floating_dtype(A) + + if not center: + return _svd_lowrank(A, q, niter=niter, M=None) + + if _utils.is_sparse(A): + if len(A.shape) != 2: + raise ValueError("pca_lowrank input is expected to be 2-dimensional tensor") + c = torch.sparse.sum(A, dim=(-2,)) / m + # reshape c + column_indices = c.indices()[0] + indices = torch.zeros( + 2, + len(column_indices), + dtype=column_indices.dtype, + device=column_indices.device, + ) + indices[0] = column_indices + C_t = torch.sparse_coo_tensor( + indices, c.values(), (n, 1), dtype=dtype, device=A.device + ) + + ones_m1_t = torch.ones(A.shape[:-2] + (1, m), dtype=dtype, device=A.device) + M = _utils.transpose(torch.sparse.mm(C_t, ones_m1_t)) + return _svd_lowrank(A, q, niter=niter, M=M) + else: + C = A.mean(dim=(-2,), keepdim=True) + return _svd_lowrank(A - C, q, niter=niter, M=None) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_python_dispatcher.py b/llmeval-env/lib/python3.10/site-packages/torch/_python_dispatcher.py new file mode 100644 index 0000000000000000000000000000000000000000..bfd208eddb9e8c05938d149127c37df0249d9fd3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_python_dispatcher.py @@ -0,0 +1,181 @@ +import re + +import torch._C as C + + +""" +PythonDispatcher class is a thin python-binding to C++ dispatcher and it +is designed to show how dispatcher precompute works. In particular, +it shows for a certain op `foo`, what the computed dispatch table looks +like after user register their kernels to certains dispatch keys. + +In the real C++ dispatcher we support many dispatch keys for different +functionalities. For simplicity PythonDispatcher only supports dispatch +keys for a single example of each use case. These use cases are listed below: + +- CPU/AutogradCPU: represents in-tree backends which we usually have dedicated inference & + autograd kernel in pytorch core library. + E.g. CPU, CUDA +- FPGA/AutogradOther: represents in-tree backends which we usually have backend specific + inference kernels, but they share the same autograd kernel specified in AutogradOther. + E.g. FPGA, SparseCsrCPU +- XLA/AutogradXLA: represents out-of-tree backends which we don't have either inference or autograd + kernel defined in pytorch core library. Backend owner is responsible for registering both + inference & autograd kernels in their extensions(e.g. torch-xla) for the operators they support. + E.g. XLA, XPU, MPS +- CompositeExplicitAutograd: alias key mapped to inference kernels of all backends like CPU, CUDA, XLA etc. + Kernels registered to this key MUST work for inference for all backends. +- Autograd: alias key mapped to autograd of all backends like AutogradCPU, AutogradXLA, AutogradOther. + Kernels registered to this key MUST work for autograd for all backends. +- CompositeImplicitAutograd: alias key CompositeImplicitAutograd = CompositeExplicitAutograd + Autograd + Kernels registered to this key MUST work for both inference + autograd for all backends. + +Note we only allow registrations to alias keys inside pytorch core library. E.g +you shouldn't register a CompositeImplicitAutograd or CompositeExplicitAutograd +kernel from torch-xla extension, instead you should upstream the kernel into +pytorch/pytorch repo so that it's available for all backends and continuously +tested even without the extension. + +Usage: + dispatcher = PythonDispatcher() + dispatcher.register(["CPU", "XLA", "CompositeImplicitAutograd"]) + print(dispatcher.dispatchTable()) # This tells you exactly which kernel is used for certain backend. + # For more debugging information + # print(dispatcher.keys()) + # print(dispatcher.registrations()) + # print(dispatcher.rawRegistrations()) + # print(dispatcher.rawDispatchTable()) +PythonDispatcher calls C++ dispatcher under the hood for to precompute dispatch table. +This file only provides the simplified API for developers, relevant test code is located in +test/test_dispatch.py +""" + + +class PythonDispatcher: + namespace = "__test__" + name = "foo" + # fmt: off + runtime_keys = [ + "CPU", "AutogradCPU", + "FPGA", "AutogradOther", + "XLA", "AutogradXLA", + "Lazy", "AutogradLazy", + ] + # fmt: on + alias_keys = [ + "CompositeExplicitAutograd", + "Autograd", + "CompositeImplicitAutograd", + ] + supported_keys = runtime_keys + alias_keys + + def __init__(self): + C._dispatch_check_invariants(self.name) # type: ignore[attr-defined] + self.ref = C._dispatch_library("FRAGMENT", self.namespace, "") + self.ref.def_("foo(Tensor x) -> Tensor") + + """ + Returns a list of dispatch keys supported by PythonDispatcher. + You can register kernels to these keys. + """ + + def keys(self): + return self.supported_keys + + """ + Register kernels to the target dispatchKeys. + dispatchKeys(list[str]): a list of dispatch keys that you want to register + your own kernel. Note that you don't need to write the kernel yourself in + this PythonDispatcher.E.g. for CPU key, a kernel(e.g fn_CPU for CPU) is + automatically generated and registered. + """ + + def register(self, dispatchKeys): + # Overriden is not supported and triggers a warning in C++ dispatcher. + if len(set(dispatchKeys)) != len(dispatchKeys): + raise RuntimeError( + f"Overriden is not allowed but found duplicates in {dispatchKeys}." + ) + # We currently forbid this in codegen instead of C++ dispatcher. + if ( + "CompositeImplicitAutograd" in dispatchKeys + and "CompositeExplicitAutograd" in dispatchKeys + ): + raise RuntimeError( + "Registration to both CompositeImplicitAutograd and CompositeExplicitAutograd is not allowed." + ) + for key in dispatchKeys: + if key not in self.supported_keys: + raise RuntimeError( + f"{key} is not supported, please select a dispatch key in {self.supported_keys}." + ) + self.ref.impl_t_t("foo", dispatch=key, debug="fn_" + key) + + """ + Helper function to format (key, kernel). + """ + + def _format_line(self, key, kernel): + return f"{key:<15} {kernel}\n" + + """ + Helper function to print a table header. + """ + + def _format_header(self, header): + s = f""" +{header} +""" + s += self._format_line("key", "kernel") + s += "---------------------------\n" + return s + + """ + Returns raw output of all registration info for debugging only. + Use registrations() for a simplified version. + """ + + def rawRegistrations(self): + return C._dispatch_dump(f"{self.namespace}::{self.name}") # type: ignore[attr-defined] + + """ + Returns raw output of computed dispatch table for debugging only. + Use dispatchTable() for a simplified version. + """ + + def rawDispatchTable(self): + return C._dispatch_dump_table(f"{self.namespace}::{self.name}") # type: ignore[attr-defined] + + """ + Returns a table(str) including all the registrations from users. + Note this includes registrations to both runtime keys and alias keys. + """ + + def registrations(self): + output = self._format_header("Registered Kernels") + state = self.rawRegistrations() + state_entries = state.split("\n") + for line in state_entries: + first = line.split(":")[0] + if any(first.startswith(k) for k in self.supported_keys): + kernel = line.split("::")[0].split(" ")[1] + output += self._format_line(first, kernel) + return output + + """ + Returns the computed dispatch table(str). Note this only include + runtime keys, registrations to alias keys have been decoded to their + mapped runtime keys. + """ + + def dispatchTable(self): + output = self._format_header("Computed Dispatch Table") + table = self.rawDispatchTable() + table_entries = table.split("\n") + regex = re.compile(r"registered at .*FallbackKernel\.cpp.*(\[)") + for line in table_entries: + k = line.split(":")[0] + if k in self.runtime_keys: + entry = regex.sub("[", line) + output += self._format_line(k, entry.split(": ")[1]) + return output diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_sources.py b/llmeval-env/lib/python3.10/site-packages/torch/_sources.py new file mode 100644 index 0000000000000000000000000000000000000000..3f56bd8ef2473aa9c35ad6232448c9d5d44b8056 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_sources.py @@ -0,0 +1,137 @@ +import ast +import functools +import inspect +from textwrap import dedent +from typing import Any, List, NamedTuple, Optional, Tuple + +from torch._C import ErrorReport +from torch._C._jit_tree_views import SourceRangeFactory + + +def get_source_lines_and_file( + obj: Any, + error_msg: Optional[str] = None, +) -> Tuple[List[str], int, Optional[str]]: + """ + Wrapper around inspect.getsourcelines and inspect.getsourcefile. + + Returns: (sourcelines, file_lino, filename) + """ + filename = None # in case getsourcefile throws + try: + filename = inspect.getsourcefile(obj) + sourcelines, file_lineno = inspect.getsourcelines(obj) + except OSError as e: + msg = ( + f"Can't get source for {obj}. TorchScript requires source access in " + "order to carry out compilation, make sure original .py files are " + "available." + ) + if error_msg: + msg += "\n" + error_msg + raise OSError(msg) from e + + return sourcelines, file_lineno, filename + + +def normalize_source_lines(sourcelines: List[str]) -> List[str]: + """ + This helper function accepts a list of source lines. It finds the + indentation level of the function definition (`def`), then it indents + all lines in the function body to a point at or greater than that + level. This allows for comments and continued string literals that + are at a lower indentation than the rest of the code. + Args: + sourcelines: function source code, separated into lines by + the '\n' character + Returns: + A list of source lines that have been correctly aligned + """ + + def remove_prefix(text, prefix): + return text[text.startswith(prefix) and len(prefix) :] + + # Find the line and line number containing the function definition + idx = None + for i, l in enumerate(sourcelines): + if l.lstrip().startswith("def"): + idx = i + break + + # This will happen when the function is a lambda- we won't find "def" anywhere in the source + # lines in that case. Currently trying to JIT compile a lambda will throw an error up in + # `parse_def()`, but we might want to handle this case in the future. + if idx is None: + return sourcelines + + # Get a string representing the amount of leading whitespace + fn_def = sourcelines[idx] + whitespace = fn_def.split("def")[0] + + # Add this leading whitespace to all lines before and after the `def` + aligned_prefix = [ + whitespace + remove_prefix(s, whitespace) for s in sourcelines[:idx] + ] + aligned_suffix = [ + whitespace + remove_prefix(s, whitespace) for s in sourcelines[idx + 1 :] + ] + + # Put it together again + aligned_prefix.append(fn_def) + return aligned_prefix + aligned_suffix + + +# Thin wrapper around SourceRangeFactory to store extra metadata +# about the function-to-be-compiled. +class SourceContext(SourceRangeFactory): + def __init__( + self, + source, + filename, + file_lineno, + leading_whitespace_len, + uses_true_division=True, + funcname=None, + ): + super().__init__(source, filename, file_lineno, leading_whitespace_len) + self.uses_true_division = uses_true_division + self.filename = filename + self.funcname = funcname + + +@functools.lru_cache(maxsize=None) +def make_source_context(*args): + return SourceContext(*args) + + +def fake_range(): + return SourceContext("", None, 0, 0).make_raw_range(0, 1) + + +class ParsedDef(NamedTuple): + ast: ast.Module + ctx: SourceContext + source: str + filename: Optional[str] + file_lineno: int + + +def parse_def(fn): + sourcelines, file_lineno, filename = get_source_lines_and_file( + fn, ErrorReport.call_stack() + ) + sourcelines = normalize_source_lines(sourcelines) + source = "".join(sourcelines) + dedent_src = dedent(source) + py_ast = ast.parse(dedent_src) + if len(py_ast.body) != 1 or not isinstance(py_ast.body[0], ast.FunctionDef): + raise RuntimeError( + f"Expected a single top-level function: {filename}:{file_lineno}" + ) + leading_whitespace_len = len(source.split("\n", 1)[0]) - len( + dedent_src.split("\n", 1)[0] + ) + ctx = make_source_context( + source, filename, file_lineno, leading_whitespace_len, True, fn.__name__ + ) + return ParsedDef(py_ast, ctx, source, filename, file_lineno) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_streambase.py b/llmeval-env/lib/python3.10/site-packages/torch/_streambase.py new file mode 100644 index 0000000000000000000000000000000000000000..1d4737563ddb66259f5a365b193a45d4b9945ef6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_streambase.py @@ -0,0 +1,45 @@ +from abc import ABC, abstractmethod + + +class _StreamBase(ABC): + r"""Base stream class abstraction for multi backends Stream to herit from""" + + @abstractmethod + def wait_event(self, event): + raise NotImplementedError() + + @abstractmethod + def wait_stream(self, stream): + raise NotImplementedError() + + @abstractmethod + def record_event(self, event=None): + raise NotImplementedError() + + @abstractmethod + def query(self): + raise NotImplementedError() + + @abstractmethod + def synchronize(self): + raise NotImplementedError() + + @abstractmethod + def __eq__(self, stream): + raise NotImplementedError() + + +class _EventBase(ABC): + r"""Base Event class abstraction for multi backends Event to herit from""" + + @abstractmethod + def wait(self, stream=None): + raise NotImplementedError() + + @abstractmethod + def query(self): + raise NotImplementedError() + + @abstractmethod + def synchronize(self): + raise NotImplementedError() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_tensor_docs.py b/llmeval-env/lib/python3.10/site-packages/torch/_tensor_docs.py new file mode 100644 index 0000000000000000000000000000000000000000..2543177fdd4615f2afdea3ece5916639ff6dc0a6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_tensor_docs.py @@ -0,0 +1,6976 @@ +"""Adds docstrings to Tensor functions""" + +import torch._C +from torch._C import _add_docstr as add_docstr +from torch._torch_docs import parse_kwargs, reproducibility_notes + + +def add_docstr_all(method, docstr): + add_docstr(getattr(torch._C.TensorBase, method), docstr) + + +common_args = parse_kwargs( + """ + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. +""" +) + +new_common_args = parse_kwargs( + """ + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + dtype (:class:`torch.dtype`, optional): the desired type of returned tensor. + Default: if None, same :class:`torch.dtype` as this tensor. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if None, same :class:`torch.device` as this tensor. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. +""" +) + +add_docstr_all( + "new_tensor", + """ +new_tensor(data, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \ +pin_memory=False) -> Tensor +""" + + r""" + +Returns a new Tensor with :attr:`data` as the tensor data. +By default, the returned Tensor has the same :class:`torch.dtype` and +:class:`torch.device` as this tensor. + +.. warning:: + + :func:`new_tensor` always copies :attr:`data`. If you have a Tensor + ``data`` and want to avoid a copy, use :func:`torch.Tensor.requires_grad_` + or :func:`torch.Tensor.detach`. + If you have a numpy array and want to avoid a copy, use + :func:`torch.from_numpy`. + +.. warning:: + + When data is a tensor `x`, :func:`new_tensor()` reads out 'the data' from whatever it is passed, + and constructs a leaf variable. Therefore ``tensor.new_tensor(x)`` is equivalent to ``x.clone().detach()`` + and ``tensor.new_tensor(x, requires_grad=True)`` is equivalent to ``x.clone().detach().requires_grad_(True)``. + The equivalents using ``clone()`` and ``detach()`` are recommended. + +Args: + data (array_like): The returned Tensor copies :attr:`data`. + +Keyword args: + {dtype} + {device} + {requires_grad} + {layout} + {pin_memory} + +Example:: + + >>> tensor = torch.ones((2,), dtype=torch.int8) + >>> data = [[0, 1], [2, 3]] + >>> tensor.new_tensor(data) + tensor([[ 0, 1], + [ 2, 3]], dtype=torch.int8) + +""".format( + **new_common_args + ), +) + +add_docstr_all( + "new_full", + """ +new_full(size, fill_value, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \ +pin_memory=False) -> Tensor +""" + + r""" + +Returns a Tensor of size :attr:`size` filled with :attr:`fill_value`. +By default, the returned Tensor has the same :class:`torch.dtype` and +:class:`torch.device` as this tensor. + +Args: + fill_value (scalar): the number to fill the output tensor with. + +Keyword args: + {dtype} + {device} + {requires_grad} + {layout} + {pin_memory} + +Example:: + + >>> tensor = torch.ones((2,), dtype=torch.float64) + >>> tensor.new_full((3, 4), 3.141592) + tensor([[ 3.1416, 3.1416, 3.1416, 3.1416], + [ 3.1416, 3.1416, 3.1416, 3.1416], + [ 3.1416, 3.1416, 3.1416, 3.1416]], dtype=torch.float64) + +""".format( + **new_common_args + ), +) + +add_docstr_all( + "new_empty", + """ +new_empty(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \ +pin_memory=False) -> Tensor +""" + + r""" + +Returns a Tensor of size :attr:`size` filled with uninitialized data. +By default, the returned Tensor has the same :class:`torch.dtype` and +:class:`torch.device` as this tensor. + +Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + +Keyword args: + {dtype} + {device} + {requires_grad} + {layout} + {pin_memory} + +Example:: + + >>> tensor = torch.ones(()) + >>> tensor.new_empty((2, 3)) + tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30], + [ 3.0949e-41, 4.4842e-44, 0.0000e+00]]) + +""".format( + **new_common_args + ), +) + +add_docstr_all( + "new_empty_strided", + """ +new_empty_strided(size, stride, dtype=None, device=None, requires_grad=False, layout=torch.strided, \ +pin_memory=False) -> Tensor +""" + + r""" + +Returns a Tensor of size :attr:`size` and strides :attr:`stride` filled with +uninitialized data. By default, the returned Tensor has the same +:class:`torch.dtype` and :class:`torch.device` as this tensor. + +Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + +Keyword args: + {dtype} + {device} + {requires_grad} + {layout} + {pin_memory} + +Example:: + + >>> tensor = torch.ones(()) + >>> tensor.new_empty_strided((2, 3), (3, 1)) + tensor([[ 5.8182e-18, 4.5765e-41, -1.0545e+30], + [ 3.0949e-41, 4.4842e-44, 0.0000e+00]]) + +""".format( + **new_common_args + ), +) + +add_docstr_all( + "new_ones", + """ +new_ones(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \ +pin_memory=False) -> Tensor +""" + + r""" + +Returns a Tensor of size :attr:`size` filled with ``1``. +By default, the returned Tensor has the same :class:`torch.dtype` and +:class:`torch.device` as this tensor. + +Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + +Keyword args: + {dtype} + {device} + {requires_grad} + {layout} + {pin_memory} + +Example:: + + >>> tensor = torch.tensor((), dtype=torch.int32) + >>> tensor.new_ones((2, 3)) + tensor([[ 1, 1, 1], + [ 1, 1, 1]], dtype=torch.int32) + +""".format( + **new_common_args + ), +) + +add_docstr_all( + "new_zeros", + """ +new_zeros(size, *, dtype=None, device=None, requires_grad=False, layout=torch.strided, \ +pin_memory=False) -> Tensor +""" + + r""" + +Returns a Tensor of size :attr:`size` filled with ``0``. +By default, the returned Tensor has the same :class:`torch.dtype` and +:class:`torch.device` as this tensor. + +Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + +Keyword args: + {dtype} + {device} + {requires_grad} + {layout} + {pin_memory} + +Example:: + + >>> tensor = torch.tensor((), dtype=torch.float64) + >>> tensor.new_zeros((2, 3)) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]], dtype=torch.float64) + +""".format( + **new_common_args + ), +) + +add_docstr_all( + "abs", + r""" +abs() -> Tensor + +See :func:`torch.abs` +""", +) + +add_docstr_all( + "abs_", + r""" +abs_() -> Tensor + +In-place version of :meth:`~Tensor.abs` +""", +) + +add_docstr_all( + "absolute", + r""" +absolute() -> Tensor + +Alias for :func:`abs` +""", +) + +add_docstr_all( + "absolute_", + r""" +absolute_() -> Tensor + +In-place version of :meth:`~Tensor.absolute` +Alias for :func:`abs_` +""", +) + +add_docstr_all( + "acos", + r""" +acos() -> Tensor + +See :func:`torch.acos` +""", +) + +add_docstr_all( + "acos_", + r""" +acos_() -> Tensor + +In-place version of :meth:`~Tensor.acos` +""", +) + +add_docstr_all( + "arccos", + r""" +arccos() -> Tensor + +See :func:`torch.arccos` +""", +) + +add_docstr_all( + "arccos_", + r""" +arccos_() -> Tensor + +In-place version of :meth:`~Tensor.arccos` +""", +) + +add_docstr_all( + "acosh", + r""" +acosh() -> Tensor + +See :func:`torch.acosh` +""", +) + +add_docstr_all( + "acosh_", + r""" +acosh_() -> Tensor + +In-place version of :meth:`~Tensor.acosh` +""", +) + +add_docstr_all( + "arccosh", + r""" +acosh() -> Tensor + +See :func:`torch.arccosh` +""", +) + +add_docstr_all( + "arccosh_", + r""" +acosh_() -> Tensor + +In-place version of :meth:`~Tensor.arccosh` +""", +) + +add_docstr_all( + "add", + r""" +add(other, *, alpha=1) -> Tensor + +Add a scalar or tensor to :attr:`self` tensor. If both :attr:`alpha` +and :attr:`other` are specified, each element of :attr:`other` is scaled by +:attr:`alpha` before being used. + +When :attr:`other` is a tensor, the shape of :attr:`other` must be +:ref:`broadcastable ` with the shape of the underlying +tensor + +See :func:`torch.add` +""", +) + +add_docstr_all( + "add_", + r""" +add_(other, *, alpha=1) -> Tensor + +In-place version of :meth:`~Tensor.add` +""", +) + +add_docstr_all( + "addbmm", + r""" +addbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor + +See :func:`torch.addbmm` +""", +) + +add_docstr_all( + "addbmm_", + r""" +addbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor + +In-place version of :meth:`~Tensor.addbmm` +""", +) + +add_docstr_all( + "addcdiv", + r""" +addcdiv(tensor1, tensor2, *, value=1) -> Tensor + +See :func:`torch.addcdiv` +""", +) + +add_docstr_all( + "addcdiv_", + r""" +addcdiv_(tensor1, tensor2, *, value=1) -> Tensor + +In-place version of :meth:`~Tensor.addcdiv` +""", +) + +add_docstr_all( + "addcmul", + r""" +addcmul(tensor1, tensor2, *, value=1) -> Tensor + +See :func:`torch.addcmul` +""", +) + +add_docstr_all( + "addcmul_", + r""" +addcmul_(tensor1, tensor2, *, value=1) -> Tensor + +In-place version of :meth:`~Tensor.addcmul` +""", +) + +add_docstr_all( + "addmm", + r""" +addmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor + +See :func:`torch.addmm` +""", +) + +add_docstr_all( + "addmm_", + r""" +addmm_(mat1, mat2, *, beta=1, alpha=1) -> Tensor + +In-place version of :meth:`~Tensor.addmm` +""", +) + +add_docstr_all( + "addmv", + r""" +addmv(mat, vec, *, beta=1, alpha=1) -> Tensor + +See :func:`torch.addmv` +""", +) + +add_docstr_all( + "addmv_", + r""" +addmv_(mat, vec, *, beta=1, alpha=1) -> Tensor + +In-place version of :meth:`~Tensor.addmv` +""", +) + +add_docstr_all( + "sspaddmm", + r""" +sspaddmm(mat1, mat2, *, beta=1, alpha=1) -> Tensor + +See :func:`torch.sspaddmm` +""", +) + +add_docstr_all( + "smm", + r""" +smm(mat) -> Tensor + +See :func:`torch.smm` +""", +) + +add_docstr_all( + "addr", + r""" +addr(vec1, vec2, *, beta=1, alpha=1) -> Tensor + +See :func:`torch.addr` +""", +) + +add_docstr_all( + "addr_", + r""" +addr_(vec1, vec2, *, beta=1, alpha=1) -> Tensor + +In-place version of :meth:`~Tensor.addr` +""", +) + +add_docstr_all( + "align_as", + r""" +align_as(other) -> Tensor + +Permutes the dimensions of the :attr:`self` tensor to match the dimension order +in the :attr:`other` tensor, adding size-one dims for any new names. + +This operation is useful for explicit broadcasting by names (see examples). + +All of the dims of :attr:`self` must be named in order to use this method. +The resulting tensor is a view on the original tensor. + +All dimension names of :attr:`self` must be present in ``other.names``. +:attr:`other` may contain named dimensions that are not in ``self.names``; +the output tensor has a size-one dimension for each of those new names. + +To align a tensor to a specific order, use :meth:`~Tensor.align_to`. + +Examples:: + + # Example 1: Applying a mask + >>> mask = torch.randint(2, [127, 128], dtype=torch.bool).refine_names('W', 'H') + >>> imgs = torch.randn(32, 128, 127, 3, names=('N', 'H', 'W', 'C')) + >>> imgs.masked_fill_(mask.align_as(imgs), 0) + + + # Example 2: Applying a per-channel-scale + >>> def scale_channels(input, scale): + >>> scale = scale.refine_names('C') + >>> return input * scale.align_as(input) + + >>> num_channels = 3 + >>> scale = torch.randn(num_channels, names=('C',)) + >>> imgs = torch.rand(32, 128, 128, num_channels, names=('N', 'H', 'W', 'C')) + >>> more_imgs = torch.rand(32, num_channels, 128, 128, names=('N', 'C', 'H', 'W')) + >>> videos = torch.randn(3, num_channels, 128, 128, 128, names=('N', 'C', 'H', 'W', 'D')) + + # scale_channels is agnostic to the dimension order of the input + >>> scale_channels(imgs, scale) + >>> scale_channels(more_imgs, scale) + >>> scale_channels(videos, scale) + +.. warning:: + The named tensor API is experimental and subject to change. + +""", +) + +add_docstr_all( + "all", + r""" +all(dim=None, keepdim=False) -> Tensor + +See :func:`torch.all` +""", +) + +add_docstr_all( + "allclose", + r""" +allclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor + +See :func:`torch.allclose` +""", +) + +add_docstr_all( + "angle", + r""" +angle() -> Tensor + +See :func:`torch.angle` +""", +) + +add_docstr_all( + "any", + r""" +any(dim=None, keepdim=False) -> Tensor + +See :func:`torch.any` +""", +) + +add_docstr_all( + "apply_", + r""" +apply_(callable) -> Tensor + +Applies the function :attr:`callable` to each element in the tensor, replacing +each element with the value returned by :attr:`callable`. + +.. note:: + + This function only works with CPU tensors and should not be used in code + sections that require high performance. +""", +) + +add_docstr_all( + "asin", + r""" +asin() -> Tensor + +See :func:`torch.asin` +""", +) + +add_docstr_all( + "asin_", + r""" +asin_() -> Tensor + +In-place version of :meth:`~Tensor.asin` +""", +) + +add_docstr_all( + "arcsin", + r""" +arcsin() -> Tensor + +See :func:`torch.arcsin` +""", +) + +add_docstr_all( + "arcsin_", + r""" +arcsin_() -> Tensor + +In-place version of :meth:`~Tensor.arcsin` +""", +) + +add_docstr_all( + "asinh", + r""" +asinh() -> Tensor + +See :func:`torch.asinh` +""", +) + +add_docstr_all( + "asinh_", + r""" +asinh_() -> Tensor + +In-place version of :meth:`~Tensor.asinh` +""", +) + +add_docstr_all( + "arcsinh", + r""" +arcsinh() -> Tensor + +See :func:`torch.arcsinh` +""", +) + +add_docstr_all( + "arcsinh_", + r""" +arcsinh_() -> Tensor + +In-place version of :meth:`~Tensor.arcsinh` +""", +) + +add_docstr_all( + "as_strided", + r""" +as_strided(size, stride, storage_offset=None) -> Tensor + +See :func:`torch.as_strided` +""", +) + +add_docstr_all( + "as_strided_", + r""" +as_strided_(size, stride, storage_offset=None) -> Tensor + +In-place version of :meth:`~Tensor.as_strided` +""", +) + +add_docstr_all( + "atan", + r""" +atan() -> Tensor + +See :func:`torch.atan` +""", +) + +add_docstr_all( + "atan_", + r""" +atan_() -> Tensor + +In-place version of :meth:`~Tensor.atan` +""", +) + +add_docstr_all( + "arctan", + r""" +arctan() -> Tensor + +See :func:`torch.arctan` +""", +) + +add_docstr_all( + "arctan_", + r""" +arctan_() -> Tensor + +In-place version of :meth:`~Tensor.arctan` +""", +) + +add_docstr_all( + "atan2", + r""" +atan2(other) -> Tensor + +See :func:`torch.atan2` +""", +) + +add_docstr_all( + "atan2_", + r""" +atan2_(other) -> Tensor + +In-place version of :meth:`~Tensor.atan2` +""", +) + +add_docstr_all( + "arctan2", + r""" +arctan2(other) -> Tensor + +See :func:`torch.arctan2` +""", +) + +add_docstr_all( + "arctan2_", + r""" +atan2_(other) -> Tensor + +In-place version of :meth:`~Tensor.arctan2` +""", +) + +add_docstr_all( + "atanh", + r""" +atanh() -> Tensor + +See :func:`torch.atanh` +""", +) + +add_docstr_all( + "atanh_", + r""" +atanh_(other) -> Tensor + +In-place version of :meth:`~Tensor.atanh` +""", +) + +add_docstr_all( + "arctanh", + r""" +arctanh() -> Tensor + +See :func:`torch.arctanh` +""", +) + +add_docstr_all( + "arctanh_", + r""" +arctanh_(other) -> Tensor + +In-place version of :meth:`~Tensor.arctanh` +""", +) + +add_docstr_all( + "baddbmm", + r""" +baddbmm(batch1, batch2, *, beta=1, alpha=1) -> Tensor + +See :func:`torch.baddbmm` +""", +) + +add_docstr_all( + "baddbmm_", + r""" +baddbmm_(batch1, batch2, *, beta=1, alpha=1) -> Tensor + +In-place version of :meth:`~Tensor.baddbmm` +""", +) + +add_docstr_all( + "bernoulli", + r""" +bernoulli(*, generator=None) -> Tensor + +Returns a result tensor where each :math:`\texttt{result[i]}` is independently +sampled from :math:`\text{Bernoulli}(\texttt{self[i]})`. :attr:`self` must have +floating point ``dtype``, and the result will have the same ``dtype``. + +See :func:`torch.bernoulli` +""", +) + +add_docstr_all( + "bernoulli_", + r""" +bernoulli_(p=0.5, *, generator=None) -> Tensor + +Fills each location of :attr:`self` with an independent sample from +:math:`\text{Bernoulli}(\texttt{p})`. :attr:`self` can have integral +``dtype``. + +:attr:`p` should either be a scalar or tensor containing probabilities to be +used for drawing the binary random number. + +If it is a tensor, the :math:`\text{i}^{th}` element of :attr:`self` tensor +will be set to a value sampled from +:math:`\text{Bernoulli}(\texttt{p\_tensor[i]})`. In this case `p` must have +floating point ``dtype``. + +See also :meth:`~Tensor.bernoulli` and :func:`torch.bernoulli` +""", +) + +add_docstr_all( + "bincount", + r""" +bincount(weights=None, minlength=0) -> Tensor + +See :func:`torch.bincount` +""", +) + +add_docstr_all( + "bitwise_not", + r""" +bitwise_not() -> Tensor + +See :func:`torch.bitwise_not` +""", +) + +add_docstr_all( + "bitwise_not_", + r""" +bitwise_not_() -> Tensor + +In-place version of :meth:`~Tensor.bitwise_not` +""", +) + +add_docstr_all( + "bitwise_and", + r""" +bitwise_and() -> Tensor + +See :func:`torch.bitwise_and` +""", +) + +add_docstr_all( + "bitwise_and_", + r""" +bitwise_and_() -> Tensor + +In-place version of :meth:`~Tensor.bitwise_and` +""", +) + +add_docstr_all( + "bitwise_or", + r""" +bitwise_or() -> Tensor + +See :func:`torch.bitwise_or` +""", +) + +add_docstr_all( + "bitwise_or_", + r""" +bitwise_or_() -> Tensor + +In-place version of :meth:`~Tensor.bitwise_or` +""", +) + +add_docstr_all( + "bitwise_xor", + r""" +bitwise_xor() -> Tensor + +See :func:`torch.bitwise_xor` +""", +) + +add_docstr_all( + "bitwise_xor_", + r""" +bitwise_xor_() -> Tensor + +In-place version of :meth:`~Tensor.bitwise_xor` +""", +) + +add_docstr_all( + "bitwise_left_shift", + r""" +bitwise_left_shift(other) -> Tensor + +See :func:`torch.bitwise_left_shift` +""", +) + +add_docstr_all( + "bitwise_left_shift_", + r""" +bitwise_left_shift_(other) -> Tensor + +In-place version of :meth:`~Tensor.bitwise_left_shift` +""", +) + +add_docstr_all( + "bitwise_right_shift", + r""" +bitwise_right_shift(other) -> Tensor + +See :func:`torch.bitwise_right_shift` +""", +) + +add_docstr_all( + "bitwise_right_shift_", + r""" +bitwise_right_shift_(other) -> Tensor + +In-place version of :meth:`~Tensor.bitwise_right_shift` +""", +) + +add_docstr_all( + "broadcast_to", + r""" +broadcast_to(shape) -> Tensor + +See :func:`torch.broadcast_to`. +""", +) + +add_docstr_all( + "logical_and", + r""" +logical_and() -> Tensor + +See :func:`torch.logical_and` +""", +) + +add_docstr_all( + "logical_and_", + r""" +logical_and_() -> Tensor + +In-place version of :meth:`~Tensor.logical_and` +""", +) + +add_docstr_all( + "logical_not", + r""" +logical_not() -> Tensor + +See :func:`torch.logical_not` +""", +) + +add_docstr_all( + "logical_not_", + r""" +logical_not_() -> Tensor + +In-place version of :meth:`~Tensor.logical_not` +""", +) + +add_docstr_all( + "logical_or", + r""" +logical_or() -> Tensor + +See :func:`torch.logical_or` +""", +) + +add_docstr_all( + "logical_or_", + r""" +logical_or_() -> Tensor + +In-place version of :meth:`~Tensor.logical_or` +""", +) + +add_docstr_all( + "logical_xor", + r""" +logical_xor() -> Tensor + +See :func:`torch.logical_xor` +""", +) + +add_docstr_all( + "logical_xor_", + r""" +logical_xor_() -> Tensor + +In-place version of :meth:`~Tensor.logical_xor` +""", +) + +add_docstr_all( + "bmm", + r""" +bmm(batch2) -> Tensor + +See :func:`torch.bmm` +""", +) + +add_docstr_all( + "cauchy_", + r""" +cauchy_(median=0, sigma=1, *, generator=None) -> Tensor + +Fills the tensor with numbers drawn from the Cauchy distribution: + +.. math:: + + f(x) = \dfrac{1}{\pi} \dfrac{\sigma}{(x - \text{median})^2 + \sigma^2} + +.. note:: + Sigma (:math:`\sigma`) is used to denote the scale parameter in Cauchy distribution. +""", +) + +add_docstr_all( + "ceil", + r""" +ceil() -> Tensor + +See :func:`torch.ceil` +""", +) + +add_docstr_all( + "ceil_", + r""" +ceil_() -> Tensor + +In-place version of :meth:`~Tensor.ceil` +""", +) + +add_docstr_all( + "cholesky", + r""" +cholesky(upper=False) -> Tensor + +See :func:`torch.cholesky` +""", +) + +add_docstr_all( + "cholesky_solve", + r""" +cholesky_solve(input2, upper=False) -> Tensor + +See :func:`torch.cholesky_solve` +""", +) + +add_docstr_all( + "cholesky_inverse", + r""" +cholesky_inverse(upper=False) -> Tensor + +See :func:`torch.cholesky_inverse` +""", +) + +add_docstr_all( + "clamp", + r""" +clamp(min=None, max=None) -> Tensor + +See :func:`torch.clamp` +""", +) + +add_docstr_all( + "clamp_", + r""" +clamp_(min=None, max=None) -> Tensor + +In-place version of :meth:`~Tensor.clamp` +""", +) + +add_docstr_all( + "clip", + r""" +clip(min=None, max=None) -> Tensor + +Alias for :meth:`~Tensor.clamp`. +""", +) + +add_docstr_all( + "clip_", + r""" +clip_(min=None, max=None) -> Tensor + +Alias for :meth:`~Tensor.clamp_`. +""", +) + +add_docstr_all( + "clone", + r""" +clone(*, memory_format=torch.preserve_format) -> Tensor + +See :func:`torch.clone` +""".format( + **common_args + ), +) + +add_docstr_all( + "coalesce", + r""" +coalesce() -> Tensor + +Returns a coalesced copy of :attr:`self` if :attr:`self` is an +:ref:`uncoalesced tensor `. + +Returns :attr:`self` if :attr:`self` is a coalesced tensor. + +.. warning:: + Throws an error if :attr:`self` is not a sparse COO tensor. +""", +) + +add_docstr_all( + "contiguous", + r""" +contiguous(memory_format=torch.contiguous_format) -> Tensor + +Returns a contiguous in memory tensor containing the same data as :attr:`self` tensor. If +:attr:`self` tensor is already in the specified memory format, this function returns the +:attr:`self` tensor. + +Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.contiguous_format``. +""", +) + +add_docstr_all( + "copy_", + r""" +copy_(src, non_blocking=False) -> Tensor + +Copies the elements from :attr:`src` into :attr:`self` tensor and returns +:attr:`self`. + +The :attr:`src` tensor must be :ref:`broadcastable ` +with the :attr:`self` tensor. It may be of a different data type or reside on a +different device. + +Args: + src (Tensor): the source tensor to copy from + non_blocking (bool): if ``True`` and this copy is between CPU and GPU, + the copy may occur asynchronously with respect to the host. For other + cases, this argument has no effect. +""", +) + +add_docstr_all( + "conj", + r""" +conj() -> Tensor + +See :func:`torch.conj` +""", +) + +add_docstr_all( + "conj_physical", + r""" +conj_physical() -> Tensor + +See :func:`torch.conj_physical` +""", +) + +add_docstr_all( + "conj_physical_", + r""" +conj_physical_() -> Tensor + +In-place version of :meth:`~Tensor.conj_physical` +""", +) + +add_docstr_all( + "resolve_conj", + r""" +resolve_conj() -> Tensor + +See :func:`torch.resolve_conj` +""", +) + +add_docstr_all( + "resolve_neg", + r""" +resolve_neg() -> Tensor + +See :func:`torch.resolve_neg` +""", +) + +add_docstr_all( + "copysign", + r""" +copysign(other) -> Tensor + +See :func:`torch.copysign` +""", +) + +add_docstr_all( + "copysign_", + r""" +copysign_(other) -> Tensor + +In-place version of :meth:`~Tensor.copysign` +""", +) + +add_docstr_all( + "cos", + r""" +cos() -> Tensor + +See :func:`torch.cos` +""", +) + +add_docstr_all( + "cos_", + r""" +cos_() -> Tensor + +In-place version of :meth:`~Tensor.cos` +""", +) + +add_docstr_all( + "cosh", + r""" +cosh() -> Tensor + +See :func:`torch.cosh` +""", +) + +add_docstr_all( + "cosh_", + r""" +cosh_() -> Tensor + +In-place version of :meth:`~Tensor.cosh` +""", +) + +add_docstr_all( + "cpu", + r""" +cpu(memory_format=torch.preserve_format) -> Tensor + +Returns a copy of this object in CPU memory. + +If this object is already in CPU memory and on the correct device, +then no copy is performed and the original object is returned. + +Args: + {memory_format} + +""".format( + **common_args + ), +) + +add_docstr_all( + "count_nonzero", + r""" +count_nonzero(dim=None) -> Tensor + +See :func:`torch.count_nonzero` +""", +) + +add_docstr_all( + "cov", + r""" +cov(*, correction=1, fweights=None, aweights=None) -> Tensor + +See :func:`torch.cov` +""", +) + +add_docstr_all( + "corrcoef", + r""" +corrcoef() -> Tensor + +See :func:`torch.corrcoef` +""", +) + +add_docstr_all( + "cross", + r""" +cross(other, dim=None) -> Tensor + +See :func:`torch.cross` +""", +) + +add_docstr_all( + "cuda", + r""" +cuda(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor + +Returns a copy of this object in CUDA memory. + +If this object is already in CUDA memory and on the correct device, +then no copy is performed and the original object is returned. + +Args: + device (:class:`torch.device`): The destination GPU device. + Defaults to the current CUDA device. + non_blocking (bool): If ``True`` and the source is in pinned memory, + the copy will be asynchronous with respect to the host. + Otherwise, the argument has no effect. Default: ``False``. + {memory_format} +""".format( + **common_args + ), +) + +add_docstr_all( + "ipu", + r""" +ipu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor + +Returns a copy of this object in IPU memory. + +If this object is already in IPU memory and on the correct device, +then no copy is performed and the original object is returned. + +Args: + device (:class:`torch.device`): The destination IPU device. + Defaults to the current IPU device. + non_blocking (bool): If ``True`` and the source is in pinned memory, + the copy will be asynchronous with respect to the host. + Otherwise, the argument has no effect. Default: ``False``. + {memory_format} +""".format( + **common_args + ), +) + +add_docstr_all( + "xpu", + r""" +xpu(device=None, non_blocking=False, memory_format=torch.preserve_format) -> Tensor + +Returns a copy of this object in XPU memory. + +If this object is already in XPU memory and on the correct device, +then no copy is performed and the original object is returned. + +Args: + device (:class:`torch.device`): The destination XPU device. + Defaults to the current XPU device. + non_blocking (bool): If ``True`` and the source is in pinned memory, + the copy will be asynchronous with respect to the host. + Otherwise, the argument has no effect. Default: ``False``. + {memory_format} +""".format( + **common_args + ), +) + +add_docstr_all( + "logcumsumexp", + r""" +logcumsumexp(dim) -> Tensor + +See :func:`torch.logcumsumexp` +""", +) + +add_docstr_all( + "cummax", + r""" +cummax(dim) -> (Tensor, Tensor) + +See :func:`torch.cummax` +""", +) + +add_docstr_all( + "cummin", + r""" +cummin(dim) -> (Tensor, Tensor) + +See :func:`torch.cummin` +""", +) + +add_docstr_all( + "cumprod", + r""" +cumprod(dim, dtype=None) -> Tensor + +See :func:`torch.cumprod` +""", +) + +add_docstr_all( + "cumprod_", + r""" +cumprod_(dim, dtype=None) -> Tensor + +In-place version of :meth:`~Tensor.cumprod` +""", +) + +add_docstr_all( + "cumsum", + r""" +cumsum(dim, dtype=None) -> Tensor + +See :func:`torch.cumsum` +""", +) + +add_docstr_all( + "cumsum_", + r""" +cumsum_(dim, dtype=None) -> Tensor + +In-place version of :meth:`~Tensor.cumsum` +""", +) + +add_docstr_all( + "data_ptr", + r""" +data_ptr() -> int + +Returns the address of the first element of :attr:`self` tensor. +""", +) + +add_docstr_all( + "dequantize", + r""" +dequantize() -> Tensor + +Given a quantized Tensor, dequantize it and return the dequantized float Tensor. +""", +) + +add_docstr_all( + "dense_dim", + r""" +dense_dim() -> int + +Return the number of dense dimensions in a :ref:`sparse tensor ` :attr:`self`. + +.. note:: + Returns ``len(self.shape)`` if :attr:`self` is not a sparse tensor. + +See also :meth:`Tensor.sparse_dim` and :ref:`hybrid tensors `. +""", +) + +add_docstr_all( + "diag", + r""" +diag(diagonal=0) -> Tensor + +See :func:`torch.diag` +""", +) + +add_docstr_all( + "diag_embed", + r""" +diag_embed(offset=0, dim1=-2, dim2=-1) -> Tensor + +See :func:`torch.diag_embed` +""", +) + +add_docstr_all( + "diagflat", + r""" +diagflat(offset=0) -> Tensor + +See :func:`torch.diagflat` +""", +) + +add_docstr_all( + "diagonal", + r""" +diagonal(offset=0, dim1=0, dim2=1) -> Tensor + +See :func:`torch.diagonal` +""", +) + +add_docstr_all( + "diagonal_scatter", + r""" +diagonal_scatter(src, offset=0, dim1=0, dim2=1) -> Tensor + +See :func:`torch.diagonal_scatter` +""", +) + +add_docstr_all( + "as_strided_scatter", + r""" +as_strided_scatter(src, size, stride, storage_offset=None) -> Tensor + +See :func:`torch.as_strided_scatter` +""", +) + +add_docstr_all( + "fill_diagonal_", + r""" +fill_diagonal_(fill_value, wrap=False) -> Tensor + +Fill the main diagonal of a tensor that has at least 2-dimensions. +When dims>2, all dimensions of input must be of equal length. +This function modifies the input tensor in-place, and returns the input tensor. + +Arguments: + fill_value (Scalar): the fill value + wrap (bool): the diagonal 'wrapped' after N columns for tall matrices. + +Example:: + + >>> a = torch.zeros(3, 3) + >>> a.fill_diagonal_(5) + tensor([[5., 0., 0.], + [0., 5., 0.], + [0., 0., 5.]]) + >>> b = torch.zeros(7, 3) + >>> b.fill_diagonal_(5) + tensor([[5., 0., 0.], + [0., 5., 0.], + [0., 0., 5.], + [0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]]) + >>> c = torch.zeros(7, 3) + >>> c.fill_diagonal_(5, wrap=True) + tensor([[5., 0., 0.], + [0., 5., 0.], + [0., 0., 5.], + [0., 0., 0.], + [5., 0., 0.], + [0., 5., 0.], + [0., 0., 5.]]) + +""", +) + +add_docstr_all( + "floor_divide", + r""" +floor_divide(value) -> Tensor + +See :func:`torch.floor_divide` +""", +) + +add_docstr_all( + "floor_divide_", + r""" +floor_divide_(value) -> Tensor + +In-place version of :meth:`~Tensor.floor_divide` +""", +) + +add_docstr_all( + "diff", + r""" +diff(n=1, dim=-1, prepend=None, append=None) -> Tensor + +See :func:`torch.diff` +""", +) + +add_docstr_all( + "digamma", + r""" +digamma() -> Tensor + +See :func:`torch.digamma` +""", +) + +add_docstr_all( + "digamma_", + r""" +digamma_() -> Tensor + +In-place version of :meth:`~Tensor.digamma` +""", +) + +add_docstr_all( + "dim", + r""" +dim() -> int + +Returns the number of dimensions of :attr:`self` tensor. +""", +) + +add_docstr_all( + "dist", + r""" +dist(other, p=2) -> Tensor + +See :func:`torch.dist` +""", +) + +add_docstr_all( + "div", + r""" +div(value, *, rounding_mode=None) -> Tensor + +See :func:`torch.div` +""", +) + +add_docstr_all( + "div_", + r""" +div_(value, *, rounding_mode=None) -> Tensor + +In-place version of :meth:`~Tensor.div` +""", +) + +add_docstr_all( + "divide", + r""" +divide(value, *, rounding_mode=None) -> Tensor + +See :func:`torch.divide` +""", +) + +add_docstr_all( + "divide_", + r""" +divide_(value, *, rounding_mode=None) -> Tensor + +In-place version of :meth:`~Tensor.divide` +""", +) + +add_docstr_all( + "dot", + r""" +dot(other) -> Tensor + +See :func:`torch.dot` +""", +) + +add_docstr_all( + "element_size", + r""" +element_size() -> int + +Returns the size in bytes of an individual element. + +Example:: + + >>> torch.tensor([]).element_size() + 4 + >>> torch.tensor([], dtype=torch.uint8).element_size() + 1 + +""", +) + +add_docstr_all( + "eq", + r""" +eq(other) -> Tensor + +See :func:`torch.eq` +""", +) + +add_docstr_all( + "eq_", + r""" +eq_(other) -> Tensor + +In-place version of :meth:`~Tensor.eq` +""", +) + +add_docstr_all( + "equal", + r""" +equal(other) -> bool + +See :func:`torch.equal` +""", +) + +add_docstr_all( + "erf", + r""" +erf() -> Tensor + +See :func:`torch.erf` +""", +) + +add_docstr_all( + "erf_", + r""" +erf_() -> Tensor + +In-place version of :meth:`~Tensor.erf` +""", +) + +add_docstr_all( + "erfc", + r""" +erfc() -> Tensor + +See :func:`torch.erfc` +""", +) + +add_docstr_all( + "erfc_", + r""" +erfc_() -> Tensor + +In-place version of :meth:`~Tensor.erfc` +""", +) + +add_docstr_all( + "erfinv", + r""" +erfinv() -> Tensor + +See :func:`torch.erfinv` +""", +) + +add_docstr_all( + "erfinv_", + r""" +erfinv_() -> Tensor + +In-place version of :meth:`~Tensor.erfinv` +""", +) + +add_docstr_all( + "exp", + r""" +exp() -> Tensor + +See :func:`torch.exp` +""", +) + +add_docstr_all( + "exp_", + r""" +exp_() -> Tensor + +In-place version of :meth:`~Tensor.exp` +""", +) + +add_docstr_all( + "exp2", + r""" +exp2() -> Tensor + +See :func:`torch.exp2` +""", +) + +add_docstr_all( + "exp2_", + r""" +exp2_() -> Tensor + +In-place version of :meth:`~Tensor.exp2` +""", +) + +add_docstr_all( + "expm1", + r""" +expm1() -> Tensor + +See :func:`torch.expm1` +""", +) + +add_docstr_all( + "expm1_", + r""" +expm1_() -> Tensor + +In-place version of :meth:`~Tensor.expm1` +""", +) + +add_docstr_all( + "exponential_", + r""" +exponential_(lambd=1, *, generator=None) -> Tensor + +Fills :attr:`self` tensor with elements drawn from the PDF (probability density function): + +.. math:: + + f(x) = \lambda e^{-\lambda x}, x > 0 + +.. note:: + In probability theory, exponential distribution is supported on interval [0, :math:`\inf`) (i.e., :math:`x >= 0`) + implying that zero can be sampled from the exponential distribution. + However, :func:`torch.Tensor.exponential_` does not sample zero, + which means that its actual support is the interval (0, :math:`\inf`). + + Note that :func:`torch.distributions.exponential.Exponential` is supported on the interval [0, :math:`\inf`) and can sample zero. +""", +) + +add_docstr_all( + "fill_", + r""" +fill_(value) -> Tensor + +Fills :attr:`self` tensor with the specified value. +""", +) + +add_docstr_all( + "floor", + r""" +floor() -> Tensor + +See :func:`torch.floor` +""", +) + +add_docstr_all( + "flip", + r""" +flip(dims) -> Tensor + +See :func:`torch.flip` +""", +) + +add_docstr_all( + "fliplr", + r""" +fliplr() -> Tensor + +See :func:`torch.fliplr` +""", +) + +add_docstr_all( + "flipud", + r""" +flipud() -> Tensor + +See :func:`torch.flipud` +""", +) + +add_docstr_all( + "roll", + r""" +roll(shifts, dims) -> Tensor + +See :func:`torch.roll` +""", +) + +add_docstr_all( + "floor_", + r""" +floor_() -> Tensor + +In-place version of :meth:`~Tensor.floor` +""", +) + +add_docstr_all( + "fmod", + r""" +fmod(divisor) -> Tensor + +See :func:`torch.fmod` +""", +) + +add_docstr_all( + "fmod_", + r""" +fmod_(divisor) -> Tensor + +In-place version of :meth:`~Tensor.fmod` +""", +) + +add_docstr_all( + "frac", + r""" +frac() -> Tensor + +See :func:`torch.frac` +""", +) + +add_docstr_all( + "frac_", + r""" +frac_() -> Tensor + +In-place version of :meth:`~Tensor.frac` +""", +) + +add_docstr_all( + "frexp", + r""" +frexp(input) -> (Tensor mantissa, Tensor exponent) + +See :func:`torch.frexp` +""", +) + +add_docstr_all( + "flatten", + r""" +flatten(start_dim=0, end_dim=-1) -> Tensor + +See :func:`torch.flatten` +""", +) + +add_docstr_all( + "gather", + r""" +gather(dim, index) -> Tensor + +See :func:`torch.gather` +""", +) + +add_docstr_all( + "gcd", + r""" +gcd(other) -> Tensor + +See :func:`torch.gcd` +""", +) + +add_docstr_all( + "gcd_", + r""" +gcd_(other) -> Tensor + +In-place version of :meth:`~Tensor.gcd` +""", +) + +add_docstr_all( + "ge", + r""" +ge(other) -> Tensor + +See :func:`torch.ge`. +""", +) + +add_docstr_all( + "ge_", + r""" +ge_(other) -> Tensor + +In-place version of :meth:`~Tensor.ge`. +""", +) + +add_docstr_all( + "greater_equal", + r""" +greater_equal(other) -> Tensor + +See :func:`torch.greater_equal`. +""", +) + +add_docstr_all( + "greater_equal_", + r""" +greater_equal_(other) -> Tensor + +In-place version of :meth:`~Tensor.greater_equal`. +""", +) + +add_docstr_all( + "geometric_", + r""" +geometric_(p, *, generator=None) -> Tensor + +Fills :attr:`self` tensor with elements drawn from the geometric distribution: + +.. math:: + + P(X=k) = (1 - p)^{k - 1} p, k = 1, 2, ... + +.. note:: + :func:`torch.Tensor.geometric_` `k`-th trial is the first success hence draws samples in :math:`\{1, 2, \ldots\}`, whereas + :func:`torch.distributions.geometric.Geometric` :math:`(k+1)`-th trial is the first success + hence draws samples in :math:`\{0, 1, \ldots\}`. +""", +) + +add_docstr_all( + "geqrf", + r""" +geqrf() -> (Tensor, Tensor) + +See :func:`torch.geqrf` +""", +) + +add_docstr_all( + "ger", + r""" +ger(vec2) -> Tensor + +See :func:`torch.ger` +""", +) + +add_docstr_all( + "inner", + r""" +inner(other) -> Tensor + +See :func:`torch.inner`. +""", +) + +add_docstr_all( + "outer", + r""" +outer(vec2) -> Tensor + +See :func:`torch.outer`. +""", +) + +add_docstr_all( + "hypot", + r""" +hypot(other) -> Tensor + +See :func:`torch.hypot` +""", +) + +add_docstr_all( + "hypot_", + r""" +hypot_(other) -> Tensor + +In-place version of :meth:`~Tensor.hypot` +""", +) + +add_docstr_all( + "i0", + r""" +i0() -> Tensor + +See :func:`torch.i0` +""", +) + +add_docstr_all( + "i0_", + r""" +i0_() -> Tensor + +In-place version of :meth:`~Tensor.i0` +""", +) + +add_docstr_all( + "igamma", + r""" +igamma(other) -> Tensor + +See :func:`torch.igamma` +""", +) + +add_docstr_all( + "igamma_", + r""" +igamma_(other) -> Tensor + +In-place version of :meth:`~Tensor.igamma` +""", +) + +add_docstr_all( + "igammac", + r""" +igammac(other) -> Tensor +See :func:`torch.igammac` +""", +) + +add_docstr_all( + "igammac_", + r""" +igammac_(other) -> Tensor +In-place version of :meth:`~Tensor.igammac` +""", +) + +add_docstr_all( + "indices", + r""" +indices() -> Tensor + +Return the indices tensor of a :ref:`sparse COO tensor `. + +.. warning:: + Throws an error if :attr:`self` is not a sparse COO tensor. + +See also :meth:`Tensor.values`. + +.. note:: + This method can only be called on a coalesced sparse tensor. See + :meth:`Tensor.coalesce` for details. +""", +) + +add_docstr_all( + "get_device", + r""" +get_device() -> Device ordinal (Integer) + +For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides. +For CPU tensors, this function returns `-1`. + +Example:: + + >>> x = torch.randn(3, 4, 5, device='cuda:0') + >>> x.get_device() + 0 + >>> x.cpu().get_device() + -1 +""", +) + +add_docstr_all( + "values", + r""" +values() -> Tensor + +Return the values tensor of a :ref:`sparse COO tensor `. + +.. warning:: + Throws an error if :attr:`self` is not a sparse COO tensor. + +See also :meth:`Tensor.indices`. + +.. note:: + This method can only be called on a coalesced sparse tensor. See + :meth:`Tensor.coalesce` for details. +""", +) + +add_docstr_all( + "gt", + r""" +gt(other) -> Tensor + +See :func:`torch.gt`. +""", +) + +add_docstr_all( + "gt_", + r""" +gt_(other) -> Tensor + +In-place version of :meth:`~Tensor.gt`. +""", +) + +add_docstr_all( + "greater", + r""" +greater(other) -> Tensor + +See :func:`torch.greater`. +""", +) + +add_docstr_all( + "greater_", + r""" +greater_(other) -> Tensor + +In-place version of :meth:`~Tensor.greater`. +""", +) + +add_docstr_all( + "has_names", + r""" +Is ``True`` if any of this tensor's dimensions are named. Otherwise, is ``False``. +""", +) + +add_docstr_all( + "hardshrink", + r""" +hardshrink(lambd=0.5) -> Tensor + +See :func:`torch.nn.functional.hardshrink` +""", +) + +add_docstr_all( + "heaviside", + r""" +heaviside(values) -> Tensor + +See :func:`torch.heaviside` +""", +) + +add_docstr_all( + "heaviside_", + r""" +heaviside_(values) -> Tensor + +In-place version of :meth:`~Tensor.heaviside` +""", +) + +add_docstr_all( + "histc", + r""" +histc(bins=100, min=0, max=0) -> Tensor + +See :func:`torch.histc` +""", +) + +add_docstr_all( + "histogram", + r""" +histogram(input, bins, *, range=None, weight=None, density=False) -> (Tensor, Tensor) + +See :func:`torch.histogram` +""", +) + +add_docstr_all( + "index_add_", + r""" +index_add_(dim, index, source, *, alpha=1) -> Tensor + +Accumulate the elements of :attr:`alpha` times ``source`` into the :attr:`self` +tensor by adding to the indices in the order given in :attr:`index`. For example, +if ``dim == 0``, ``index[i] == j``, and ``alpha=-1``, then the ``i``\ th row of +``source`` is subtracted from the ``j``\ th row of :attr:`self`. + +The :attr:`dim`\ th dimension of ``source`` must have the same size as the +length of :attr:`index` (which must be a vector), and all other dimensions must +match :attr:`self`, or an error will be raised. + +For a 3-D tensor the output is given as:: + + self[index[i], :, :] += alpha * src[i, :, :] # if dim == 0 + self[:, index[i], :] += alpha * src[:, i, :] # if dim == 1 + self[:, :, index[i]] += alpha * src[:, :, i] # if dim == 2 + +Note: + {forward_reproducibility_note} + +Args: + dim (int): dimension along which to index + index (Tensor): indices of ``source`` to select from, + should have dtype either `torch.int64` or `torch.int32` + source (Tensor): the tensor containing values to add + +Keyword args: + alpha (Number): the scalar multiplier for ``source`` + +Example:: + + >>> x = torch.ones(5, 3) + >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float) + >>> index = torch.tensor([0, 4, 2]) + >>> x.index_add_(0, index, t) + tensor([[ 2., 3., 4.], + [ 1., 1., 1.], + [ 8., 9., 10.], + [ 1., 1., 1.], + [ 5., 6., 7.]]) + >>> x.index_add_(0, index, t, alpha=-1) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.], + [ 1., 1., 1.], + [ 1., 1., 1.], + [ 1., 1., 1.]]) +""".format( + **reproducibility_notes + ), +) + +add_docstr_all( + "index_copy_", + r""" +index_copy_(dim, index, tensor) -> Tensor + +Copies the elements of :attr:`tensor` into the :attr:`self` tensor by selecting +the indices in the order given in :attr:`index`. For example, if ``dim == 0`` +and ``index[i] == j``, then the ``i``\ th row of :attr:`tensor` is copied to the +``j``\ th row of :attr:`self`. + +The :attr:`dim`\ th dimension of :attr:`tensor` must have the same size as the +length of :attr:`index` (which must be a vector), and all other dimensions must +match :attr:`self`, or an error will be raised. + +.. note:: + If :attr:`index` contains duplicate entries, multiple elements from + :attr:`tensor` will be copied to the same index of :attr:`self`. The result + is nondeterministic since it depends on which copy occurs last. + +Args: + dim (int): dimension along which to index + index (LongTensor): indices of :attr:`tensor` to select from + tensor (Tensor): the tensor containing values to copy + +Example:: + + >>> x = torch.zeros(5, 3) + >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float) + >>> index = torch.tensor([0, 4, 2]) + >>> x.index_copy_(0, index, t) + tensor([[ 1., 2., 3.], + [ 0., 0., 0.], + [ 7., 8., 9.], + [ 0., 0., 0.], + [ 4., 5., 6.]]) +""", +) + +add_docstr_all( + "index_fill_", + r""" +index_fill_(dim, index, value) -> Tensor + +Fills the elements of the :attr:`self` tensor with value :attr:`value` by +selecting the indices in the order given in :attr:`index`. + +Args: + dim (int): dimension along which to index + index (LongTensor): indices of :attr:`self` tensor to fill in + value (float): the value to fill with + +Example:: + >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.float) + >>> index = torch.tensor([0, 2]) + >>> x.index_fill_(1, index, -1) + tensor([[-1., 2., -1.], + [-1., 5., -1.], + [-1., 8., -1.]]) +""", +) + +add_docstr_all( + "index_put_", + r""" +index_put_(indices, values, accumulate=False) -> Tensor + +Puts values from the tensor :attr:`values` into the tensor :attr:`self` using +the indices specified in :attr:`indices` (which is a tuple of Tensors). The +expression ``tensor.index_put_(indices, values)`` is equivalent to +``tensor[indices] = values``. Returns :attr:`self`. + +If :attr:`accumulate` is ``True``, the elements in :attr:`values` are added to +:attr:`self`. If accumulate is ``False``, the behavior is undefined if indices +contain duplicate elements. + +Args: + indices (tuple of LongTensor): tensors used to index into `self`. + values (Tensor): tensor of same dtype as `self`. + accumulate (bool): whether to accumulate into self +""", +) + +add_docstr_all( + "index_put", + r""" +index_put(indices, values, accumulate=False) -> Tensor + +Out-place version of :meth:`~Tensor.index_put_`. +""", +) + +add_docstr_all( + "index_reduce_", + r""" +index_reduce_(dim, index, source, reduce, *, include_self=True) -> Tensor + +Accumulate the elements of ``source`` into the :attr:`self` +tensor by accumulating to the indices in the order given in :attr:`index` +using the reduction given by the ``reduce`` argument. For example, if ``dim == 0``, +``index[i] == j``, ``reduce == prod`` and ``include_self == True`` then the ``i``\ th +row of ``source`` is multiplied by the ``j``\ th row of :attr:`self`. If +:obj:`include_self="True"`, the values in the :attr:`self` tensor are included +in the reduction, otherwise, rows in the :attr:`self` tensor that are accumulated +to are treated as if they were filled with the reduction identites. + +The :attr:`dim`\ th dimension of ``source`` must have the same size as the +length of :attr:`index` (which must be a vector), and all other dimensions must +match :attr:`self`, or an error will be raised. + +For a 3-D tensor with :obj:`reduce="prod"` and :obj:`include_self=True` the +output is given as:: + + self[index[i], :, :] *= src[i, :, :] # if dim == 0 + self[:, index[i], :] *= src[:, i, :] # if dim == 1 + self[:, :, index[i]] *= src[:, :, i] # if dim == 2 + +Note: + {forward_reproducibility_note} + +.. note:: + + This function only supports floating point tensors. + +.. warning:: + + This function is in beta and may change in the near future. + +Args: + dim (int): dimension along which to index + index (Tensor): indices of ``source`` to select from, + should have dtype either `torch.int64` or `torch.int32` + source (FloatTensor): the tensor containing values to accumulate + reduce (str): the reduction operation to apply + (:obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`) + +Keyword args: + include_self (bool): whether the elements from the ``self`` tensor are + included in the reduction + +Example:: + + >>> x = torch.empty(5, 3).fill_(2) + >>> t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=torch.float) + >>> index = torch.tensor([0, 4, 2, 0]) + >>> x.index_reduce_(0, index, t, 'prod') + tensor([[20., 44., 72.], + [ 2., 2., 2.], + [14., 16., 18.], + [ 2., 2., 2.], + [ 8., 10., 12.]]) + >>> x = torch.empty(5, 3).fill_(2) + >>> x.index_reduce_(0, index, t, 'prod', include_self=False) + tensor([[10., 22., 36.], + [ 2., 2., 2.], + [ 7., 8., 9.], + [ 2., 2., 2.], + [ 4., 5., 6.]]) +""".format( + **reproducibility_notes + ), +) + +add_docstr_all( + "index_select", + r""" +index_select(dim, index) -> Tensor + +See :func:`torch.index_select` +""", +) + +add_docstr_all( + "sparse_mask", + r""" +sparse_mask(mask) -> Tensor + +Returns a new :ref:`sparse tensor ` with values from a +strided tensor :attr:`self` filtered by the indices of the sparse +tensor :attr:`mask`. The values of :attr:`mask` sparse tensor are +ignored. :attr:`self` and :attr:`mask` tensors must have the same +shape. + +.. note:: + + The returned sparse tensor might contain duplicate values if :attr:`mask` + is not coalesced. It is therefore advisable to pass ``mask.coalesce()`` + if such behavior is not desired. + +.. note:: + + The returned sparse tensor has the same indices as the sparse tensor + :attr:`mask`, even when the corresponding values in :attr:`self` are + zeros. + +Args: + mask (Tensor): a sparse tensor whose indices are used as a filter + +Example:: + + >>> nse = 5 + >>> dims = (5, 5, 2, 2) + >>> I = torch.cat([torch.randint(0, dims[0], size=(nse,)), + ... torch.randint(0, dims[1], size=(nse,))], 0).reshape(2, nse) + >>> V = torch.randn(nse, dims[2], dims[3]) + >>> S = torch.sparse_coo_tensor(I, V, dims).coalesce() + >>> D = torch.randn(dims) + >>> D.sparse_mask(S) + tensor(indices=tensor([[0, 0, 0, 2], + [0, 1, 4, 3]]), + values=tensor([[[ 1.6550, 0.2397], + [-0.1611, -0.0779]], + + [[ 0.2326, -1.0558], + [ 1.4711, 1.9678]], + + [[-0.5138, -0.0411], + [ 1.9417, 0.5158]], + + [[ 0.0793, 0.0036], + [-0.2569, -0.1055]]]), + size=(5, 5, 2, 2), nnz=4, layout=torch.sparse_coo) +""", +) + +add_docstr_all( + "inverse", + r""" +inverse() -> Tensor + +See :func:`torch.inverse` +""", +) + +add_docstr_all( + "isnan", + r""" +isnan() -> Tensor + +See :func:`torch.isnan` +""", +) + +add_docstr_all( + "isinf", + r""" +isinf() -> Tensor + +See :func:`torch.isinf` +""", +) + +add_docstr_all( + "isposinf", + r""" +isposinf() -> Tensor + +See :func:`torch.isposinf` +""", +) + +add_docstr_all( + "isneginf", + r""" +isneginf() -> Tensor + +See :func:`torch.isneginf` +""", +) + +add_docstr_all( + "isfinite", + r""" +isfinite() -> Tensor + +See :func:`torch.isfinite` +""", +) + +add_docstr_all( + "isclose", + r""" +isclose(other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor + +See :func:`torch.isclose` +""", +) + +add_docstr_all( + "isreal", + r""" +isreal() -> Tensor + +See :func:`torch.isreal` +""", +) + +add_docstr_all( + "is_coalesced", + r""" +is_coalesced() -> bool + +Returns ``True`` if :attr:`self` is a :ref:`sparse COO tensor +` that is coalesced, ``False`` otherwise. + +.. warning:: + Throws an error if :attr:`self` is not a sparse COO tensor. + +See :meth:`coalesce` and :ref:`uncoalesced tensors `. +""", +) + +add_docstr_all( + "is_contiguous", + r""" +is_contiguous(memory_format=torch.contiguous_format) -> bool + +Returns True if :attr:`self` tensor is contiguous in memory in the order specified +by memory format. + +Args: + memory_format (:class:`torch.memory_format`, optional): Specifies memory allocation + order. Default: ``torch.contiguous_format``. +""", +) + +add_docstr_all( + "is_pinned", + r""" +Returns true if this tensor resides in pinned memory. +""", +) + +add_docstr_all( + "is_floating_point", + r""" +is_floating_point() -> bool + +Returns True if the data type of :attr:`self` is a floating point data type. +""", +) + +add_docstr_all( + "is_complex", + r""" +is_complex() -> bool + +Returns True if the data type of :attr:`self` is a complex data type. +""", +) + +add_docstr_all( + "is_inference", + r""" +is_inference() -> bool + +See :func:`torch.is_inference` +""", +) + +add_docstr_all( + "is_conj", + r""" +is_conj() -> bool + +Returns True if the conjugate bit of :attr:`self` is set to true. +""", +) + +add_docstr_all( + "is_neg", + r""" +is_neg() -> bool + +Returns True if the negative bit of :attr:`self` is set to true. +""", +) + +add_docstr_all( + "is_signed", + r""" +is_signed() -> bool + +Returns True if the data type of :attr:`self` is a signed data type. +""", +) + +add_docstr_all( + "is_set_to", + r""" +is_set_to(tensor) -> bool + +Returns True if both tensors are pointing to the exact same memory (same +storage, offset, size and stride). +""", +) + +add_docstr_all( + "item", + r""" +item() -> number + +Returns the value of this tensor as a standard Python number. This only works +for tensors with one element. For other cases, see :meth:`~Tensor.tolist`. + +This operation is not differentiable. + +Example:: + + >>> x = torch.tensor([1.0]) + >>> x.item() + 1.0 + +""", +) + +add_docstr_all( + "kron", + r""" +kron(other) -> Tensor + +See :func:`torch.kron` +""", +) + +add_docstr_all( + "kthvalue", + r""" +kthvalue(k, dim=None, keepdim=False) -> (Tensor, LongTensor) + +See :func:`torch.kthvalue` +""", +) + +add_docstr_all( + "ldexp", + r""" +ldexp(other) -> Tensor + +See :func:`torch.ldexp` +""", +) + +add_docstr_all( + "ldexp_", + r""" +ldexp_(other) -> Tensor + +In-place version of :meth:`~Tensor.ldexp` +""", +) + +add_docstr_all( + "lcm", + r""" +lcm(other) -> Tensor + +See :func:`torch.lcm` +""", +) + +add_docstr_all( + "lcm_", + r""" +lcm_(other) -> Tensor + +In-place version of :meth:`~Tensor.lcm` +""", +) + +add_docstr_all( + "le", + r""" +le(other) -> Tensor + +See :func:`torch.le`. +""", +) + +add_docstr_all( + "le_", + r""" +le_(other) -> Tensor + +In-place version of :meth:`~Tensor.le`. +""", +) + +add_docstr_all( + "less_equal", + r""" +less_equal(other) -> Tensor + +See :func:`torch.less_equal`. +""", +) + +add_docstr_all( + "less_equal_", + r""" +less_equal_(other) -> Tensor + +In-place version of :meth:`~Tensor.less_equal`. +""", +) + +add_docstr_all( + "lerp", + r""" +lerp(end, weight) -> Tensor + +See :func:`torch.lerp` +""", +) + +add_docstr_all( + "lerp_", + r""" +lerp_(end, weight) -> Tensor + +In-place version of :meth:`~Tensor.lerp` +""", +) + +add_docstr_all( + "lgamma", + r""" +lgamma() -> Tensor + +See :func:`torch.lgamma` +""", +) + +add_docstr_all( + "lgamma_", + r""" +lgamma_() -> Tensor + +In-place version of :meth:`~Tensor.lgamma` +""", +) + +add_docstr_all( + "log", + r""" +log() -> Tensor + +See :func:`torch.log` +""", +) + +add_docstr_all( + "log_", + r""" +log_() -> Tensor + +In-place version of :meth:`~Tensor.log` +""", +) + +add_docstr_all( + "log10", + r""" +log10() -> Tensor + +See :func:`torch.log10` +""", +) + +add_docstr_all( + "log10_", + r""" +log10_() -> Tensor + +In-place version of :meth:`~Tensor.log10` +""", +) + +add_docstr_all( + "log1p", + r""" +log1p() -> Tensor + +See :func:`torch.log1p` +""", +) + +add_docstr_all( + "log1p_", + r""" +log1p_() -> Tensor + +In-place version of :meth:`~Tensor.log1p` +""", +) + +add_docstr_all( + "log2", + r""" +log2() -> Tensor + +See :func:`torch.log2` +""", +) + +add_docstr_all( + "log2_", + r""" +log2_() -> Tensor + +In-place version of :meth:`~Tensor.log2` +""", +) + +add_docstr_all( + "logaddexp", + r""" +logaddexp(other) -> Tensor + +See :func:`torch.logaddexp` +""", +) + +add_docstr_all( + "logaddexp2", + r""" +logaddexp2(other) -> Tensor + +See :func:`torch.logaddexp2` +""", +) + +add_docstr_all( + "log_normal_", + r""" +log_normal_(mean=1, std=2, *, generator=None) + +Fills :attr:`self` tensor with numbers samples from the log-normal distribution +parameterized by the given mean :math:`\mu` and standard deviation +:math:`\sigma`. Note that :attr:`mean` and :attr:`std` are the mean and +standard deviation of the underlying normal distribution, and not of the +returned distribution: + +.. math:: + + f(x) = \dfrac{1}{x \sigma \sqrt{2\pi}}\ e^{-\frac{(\ln x - \mu)^2}{2\sigma^2}} +""", +) + +add_docstr_all( + "logsumexp", + r""" +logsumexp(dim, keepdim=False) -> Tensor + +See :func:`torch.logsumexp` +""", +) + +add_docstr_all( + "lt", + r""" +lt(other) -> Tensor + +See :func:`torch.lt`. +""", +) + +add_docstr_all( + "lt_", + r""" +lt_(other) -> Tensor + +In-place version of :meth:`~Tensor.lt`. +""", +) + +add_docstr_all( + "less", + r""" +lt(other) -> Tensor + +See :func:`torch.less`. +""", +) + +add_docstr_all( + "less_", + r""" +less_(other) -> Tensor + +In-place version of :meth:`~Tensor.less`. +""", +) + +add_docstr_all( + "lu_solve", + r""" +lu_solve(LU_data, LU_pivots) -> Tensor + +See :func:`torch.lu_solve` +""", +) + +add_docstr_all( + "map_", + r""" +map_(tensor, callable) + +Applies :attr:`callable` for each element in :attr:`self` tensor and the given +:attr:`tensor` and stores the results in :attr:`self` tensor. :attr:`self` tensor and +the given :attr:`tensor` must be :ref:`broadcastable `. + +The :attr:`callable` should have the signature:: + + def callable(a, b) -> number +""", +) + +add_docstr_all( + "masked_scatter_", + r""" +masked_scatter_(mask, source) + +Copies elements from :attr:`source` into :attr:`self` tensor at positions where +the :attr:`mask` is True. Elements from :attr:`source` are copied into :attr:`self` +starting at position 0 of :attr:`source` and continuing in order one-by-one for each +occurrence of :attr:`mask` being True. +The shape of :attr:`mask` must be :ref:`broadcastable ` +with the shape of the underlying tensor. The :attr:`source` should have at least +as many elements as the number of ones in :attr:`mask`. + +Args: + mask (BoolTensor): the boolean mask + source (Tensor): the tensor to copy from + +.. note:: + + The :attr:`mask` operates on the :attr:`self` tensor, not on the given + :attr:`source` tensor. + +Example: + + >>> self = torch.tensor([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) + >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]]) + >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) + >>> self.masked_scatter_(mask, source) + tensor([[0, 0, 0, 0, 1], + [2, 3, 0, 4, 5]]) + +""", +) + +add_docstr_all( + "masked_fill_", + r""" +masked_fill_(mask, value) + +Fills elements of :attr:`self` tensor with :attr:`value` where :attr:`mask` is +True. The shape of :attr:`mask` must be +:ref:`broadcastable ` with the shape of the underlying +tensor. + +Args: + mask (BoolTensor): the boolean mask + value (float): the value to fill in with +""", +) + +add_docstr_all( + "masked_select", + r""" +masked_select(mask) -> Tensor + +See :func:`torch.masked_select` +""", +) + +add_docstr_all( + "matrix_power", + r""" +matrix_power(n) -> Tensor + +.. note:: :meth:`~Tensor.matrix_power` is deprecated, use :func:`torch.linalg.matrix_power` instead. + +Alias for :func:`torch.linalg.matrix_power` +""", +) + +add_docstr_all( + "matrix_exp", + r""" +matrix_exp() -> Tensor + +See :func:`torch.matrix_exp` +""", +) + +add_docstr_all( + "max", + r""" +max(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor) + +See :func:`torch.max` +""", +) + +add_docstr_all( + "amax", + r""" +amax(dim=None, keepdim=False) -> Tensor + +See :func:`torch.amax` +""", +) + +add_docstr_all( + "maximum", + r""" +maximum(other) -> Tensor + +See :func:`torch.maximum` +""", +) + +add_docstr_all( + "fmax", + r""" +fmax(other) -> Tensor + +See :func:`torch.fmax` +""", +) + +add_docstr_all( + "argmax", + r""" +argmax(dim=None, keepdim=False) -> LongTensor + +See :func:`torch.argmax` +""", +) + +add_docstr_all( + "argwhere", + r""" +argwhere() -> Tensor + +See :func:`torch.argwhere` +""", +) + +add_docstr_all( + "mean", + r""" +mean(dim=None, keepdim=False, *, dtype=None) -> Tensor + +See :func:`torch.mean` +""", +) + +add_docstr_all( + "nanmean", + r""" +nanmean(dim=None, keepdim=False, *, dtype=None) -> Tensor + +See :func:`torch.nanmean` +""", +) + +add_docstr_all( + "median", + r""" +median(dim=None, keepdim=False) -> (Tensor, LongTensor) + +See :func:`torch.median` +""", +) + +add_docstr_all( + "nanmedian", + r""" +nanmedian(dim=None, keepdim=False) -> (Tensor, LongTensor) + +See :func:`torch.nanmedian` +""", +) + +add_docstr_all( + "min", + r""" +min(dim=None, keepdim=False) -> Tensor or (Tensor, Tensor) + +See :func:`torch.min` +""", +) + +add_docstr_all( + "amin", + r""" +amin(dim=None, keepdim=False) -> Tensor + +See :func:`torch.amin` +""", +) + +add_docstr_all( + "minimum", + r""" +minimum(other) -> Tensor + +See :func:`torch.minimum` +""", +) + +add_docstr_all( + "aminmax", + r""" +aminmax(*, dim=None, keepdim=False) -> (Tensor min, Tensor max) + +See :func:`torch.aminmax` +""", +) + +add_docstr_all( + "fmin", + r""" +fmin(other) -> Tensor + +See :func:`torch.fmin` +""", +) + +add_docstr_all( + "argmin", + r""" +argmin(dim=None, keepdim=False) -> LongTensor + +See :func:`torch.argmin` +""", +) + +add_docstr_all( + "mm", + r""" +mm(mat2) -> Tensor + +See :func:`torch.mm` +""", +) + +add_docstr_all( + "mode", + r""" +mode(dim=None, keepdim=False) -> (Tensor, LongTensor) + +See :func:`torch.mode` +""", +) + +add_docstr_all( + "movedim", + r""" +movedim(source, destination) -> Tensor + +See :func:`torch.movedim` +""", +) + +add_docstr_all( + "moveaxis", + r""" +moveaxis(source, destination) -> Tensor + +See :func:`torch.moveaxis` +""", +) + +add_docstr_all( + "mul", + r""" +mul(value) -> Tensor + +See :func:`torch.mul`. +""", +) + +add_docstr_all( + "mul_", + r""" +mul_(value) -> Tensor + +In-place version of :meth:`~Tensor.mul`. +""", +) + +add_docstr_all( + "multiply", + r""" +multiply(value) -> Tensor + +See :func:`torch.multiply`. +""", +) + +add_docstr_all( + "multiply_", + r""" +multiply_(value) -> Tensor + +In-place version of :meth:`~Tensor.multiply`. +""", +) + +add_docstr_all( + "multinomial", + r""" +multinomial(num_samples, replacement=False, *, generator=None) -> Tensor + +See :func:`torch.multinomial` +""", +) + +add_docstr_all( + "mv", + r""" +mv(vec) -> Tensor + +See :func:`torch.mv` +""", +) + +add_docstr_all( + "mvlgamma", + r""" +mvlgamma(p) -> Tensor + +See :func:`torch.mvlgamma` +""", +) + +add_docstr_all( + "mvlgamma_", + r""" +mvlgamma_(p) -> Tensor + +In-place version of :meth:`~Tensor.mvlgamma` +""", +) + +add_docstr_all( + "narrow", + r""" +narrow(dimension, start, length) -> Tensor + +See :func:`torch.narrow`. +""", +) + +add_docstr_all( + "narrow_copy", + r""" +narrow_copy(dimension, start, length) -> Tensor + +See :func:`torch.narrow_copy`. +""", +) + +add_docstr_all( + "ndimension", + r""" +ndimension() -> int + +Alias for :meth:`~Tensor.dim()` +""", +) + +add_docstr_all( + "nan_to_num", + r""" +nan_to_num(nan=0.0, posinf=None, neginf=None) -> Tensor + +See :func:`torch.nan_to_num`. +""", +) + +add_docstr_all( + "nan_to_num_", + r""" +nan_to_num_(nan=0.0, posinf=None, neginf=None) -> Tensor + +In-place version of :meth:`~Tensor.nan_to_num`. +""", +) + +add_docstr_all( + "ne", + r""" +ne(other) -> Tensor + +See :func:`torch.ne`. +""", +) + +add_docstr_all( + "ne_", + r""" +ne_(other) -> Tensor + +In-place version of :meth:`~Tensor.ne`. +""", +) + +add_docstr_all( + "not_equal", + r""" +not_equal(other) -> Tensor + +See :func:`torch.not_equal`. +""", +) + +add_docstr_all( + "not_equal_", + r""" +not_equal_(other) -> Tensor + +In-place version of :meth:`~Tensor.not_equal`. +""", +) + +add_docstr_all( + "neg", + r""" +neg() -> Tensor + +See :func:`torch.neg` +""", +) + +add_docstr_all( + "negative", + r""" +negative() -> Tensor + +See :func:`torch.negative` +""", +) + +add_docstr_all( + "neg_", + r""" +neg_() -> Tensor + +In-place version of :meth:`~Tensor.neg` +""", +) + +add_docstr_all( + "negative_", + r""" +negative_() -> Tensor + +In-place version of :meth:`~Tensor.negative` +""", +) + +add_docstr_all( + "nelement", + r""" +nelement() -> int + +Alias for :meth:`~Tensor.numel` +""", +) + +add_docstr_all( + "nextafter", + r""" +nextafter(other) -> Tensor +See :func:`torch.nextafter` +""", +) + +add_docstr_all( + "nextafter_", + r""" +nextafter_(other) -> Tensor +In-place version of :meth:`~Tensor.nextafter` +""", +) + +add_docstr_all( + "nonzero", + r""" +nonzero() -> LongTensor + +See :func:`torch.nonzero` +""", +) + +add_docstr_all( + "nonzero_static", + r""" +nonzero_static(input, *, size, fill_value=-1) -> Tensor + +Returns a 2-D tensor where each row is the index for a non-zero value. +The returned Tensor has the same `torch.dtype` as `torch.nonzero()`. + +Args: + input (Tensor): the input tensor to count non-zero elements. + +Keyword args: + size (int): the size of non-zero elements expected to be included in the out + tensor. Pad the out tensor with `fill_value` if the `size` is larger + than total number of non-zero elements, truncate out tensor if `size` + is smaller. The size must be a non-negative integer. + fill_value (int): the value to fill the output tensor with when `size` is larger + than the total number of non-zero elements. Default is `-1` to represent + invalid index. + +Example: + + # Example 1: Padding + >>> input_tensor = torch.tensor([[1, 0], [3, 2]]) + >>> static_size = 4 + >>> t = torch.nonzero_static(input_tensor, size = static_size) + tensor([[ 0, 0], + [ 1, 0], + [ 1, 1], + [ -1, -1]], dtype=torch.int64) + + # Example 2: Truncating + >>> input_tensor = torch.tensor([[1, 0], [3, 2]]) + >>> static_size = 2 + >>> t = torch.nonzero_static(input_tensor, size = static_size) + tensor([[ 0, 0], + [ 1, 0]], dtype=torch.int64) + + # Example 3: 0 size + >>> input_tensor = torch.tensor([10]) + >>> static_size = 0 + >>> t = torch.nonzero_static(input_tensor, size = static_size) + tensor([], size=(0, 1), dtype=torch.int64) + + # Example 4: 0 rank input + >>> input_tensor = torch.tensor(10) + >>> static_size = 2 + >>> t = torch.nonzero_static(input_tensor, size = static_size) + tensor([], size=(2, 0), dtype=torch.int64) +""", +) + +add_docstr_all( + "norm", + r""" +norm(p=2, dim=None, keepdim=False) -> Tensor + +See :func:`torch.norm` +""", +) + +add_docstr_all( + "normal_", + r""" +normal_(mean=0, std=1, *, generator=None) -> Tensor + +Fills :attr:`self` tensor with elements samples from the normal distribution +parameterized by :attr:`mean` and :attr:`std`. +""", +) + +add_docstr_all( + "numel", + r""" +numel() -> int + +See :func:`torch.numel` +""", +) + +add_docstr_all( + "numpy", + r""" +numpy(*, force=False) -> numpy.ndarray + +Returns the tensor as a NumPy :class:`ndarray`. + +If :attr:`force` is ``False`` (the default), the conversion +is performed only if the tensor is on the CPU, does not require grad, +does not have its conjugate bit set, and is a dtype and layout that +NumPy supports. The returned ndarray and the tensor will share their +storage, so changes to the tensor will be reflected in the ndarray +and vice versa. + +If :attr:`force` is ``True`` this is equivalent to +calling ``t.detach().cpu().resolve_conj().resolve_neg().numpy()``. +If the tensor isn't on the CPU or the conjugate or negative bit is set, +the tensor won't share its storage with the returned ndarray. +Setting :attr:`force` to ``True`` can be a useful shorthand. + +Args: + force (bool): if ``True``, the ndarray may be a copy of the tensor + instead of always sharing memory, defaults to ``False``. +""", +) + +add_docstr_all( + "orgqr", + r""" +orgqr(input2) -> Tensor + +See :func:`torch.orgqr` +""", +) + +add_docstr_all( + "ormqr", + r""" +ormqr(input2, input3, left=True, transpose=False) -> Tensor + +See :func:`torch.ormqr` +""", +) + +add_docstr_all( + "permute", + r""" +permute(*dims) -> Tensor + +See :func:`torch.permute` +""", +) + +add_docstr_all( + "polygamma", + r""" +polygamma(n) -> Tensor + +See :func:`torch.polygamma` +""", +) + +add_docstr_all( + "polygamma_", + r""" +polygamma_(n) -> Tensor + +In-place version of :meth:`~Tensor.polygamma` +""", +) + +add_docstr_all( + "positive", + r""" +positive() -> Tensor + +See :func:`torch.positive` +""", +) + +add_docstr_all( + "pow", + r""" +pow(exponent) -> Tensor + +See :func:`torch.pow` +""", +) + +add_docstr_all( + "pow_", + r""" +pow_(exponent) -> Tensor + +In-place version of :meth:`~Tensor.pow` +""", +) + +add_docstr_all( + "float_power", + r""" +float_power(exponent) -> Tensor + +See :func:`torch.float_power` +""", +) + +add_docstr_all( + "float_power_", + r""" +float_power_(exponent) -> Tensor + +In-place version of :meth:`~Tensor.float_power` +""", +) + +add_docstr_all( + "prod", + r""" +prod(dim=None, keepdim=False, dtype=None) -> Tensor + +See :func:`torch.prod` +""", +) + +add_docstr_all( + "put_", + r""" +put_(index, source, accumulate=False) -> Tensor + +Copies the elements from :attr:`source` into the positions specified by +:attr:`index`. For the purpose of indexing, the :attr:`self` tensor is treated as if +it were a 1-D tensor. + +:attr:`index` and :attr:`source` need to have the same number of elements, but not necessarily +the same shape. + +If :attr:`accumulate` is ``True``, the elements in :attr:`source` are added to +:attr:`self`. If accumulate is ``False``, the behavior is undefined if :attr:`index` +contain duplicate elements. + +Args: + index (LongTensor): the indices into self + source (Tensor): the tensor containing values to copy from + accumulate (bool): whether to accumulate into self + +Example:: + + >>> src = torch.tensor([[4, 3, 5], + ... [6, 7, 8]]) + >>> src.put_(torch.tensor([1, 3]), torch.tensor([9, 10])) + tensor([[ 4, 9, 5], + [ 10, 7, 8]]) +""", +) + +add_docstr_all( + "put", + r""" +put(input, index, source, accumulate=False) -> Tensor + +Out-of-place version of :meth:`torch.Tensor.put_`. +`input` corresponds to `self` in :meth:`torch.Tensor.put_`. +""", +) + +add_docstr_all( + "qr", + r""" +qr(some=True) -> (Tensor, Tensor) + +See :func:`torch.qr` +""", +) + +add_docstr_all( + "qscheme", + r""" +qscheme() -> torch.qscheme + +Returns the quantization scheme of a given QTensor. +""", +) + +add_docstr_all( + "quantile", + r""" +quantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor + +See :func:`torch.quantile` +""", +) + +add_docstr_all( + "nanquantile", + r""" +nanquantile(q, dim=None, keepdim=False, *, interpolation='linear') -> Tensor + +See :func:`torch.nanquantile` +""", +) + +add_docstr_all( + "q_scale", + r""" +q_scale() -> float + +Given a Tensor quantized by linear(affine) quantization, +returns the scale of the underlying quantizer(). +""", +) + +add_docstr_all( + "q_zero_point", + r""" +q_zero_point() -> int + +Given a Tensor quantized by linear(affine) quantization, +returns the zero_point of the underlying quantizer(). +""", +) + +add_docstr_all( + "q_per_channel_scales", + r""" +q_per_channel_scales() -> Tensor + +Given a Tensor quantized by linear (affine) per-channel quantization, +returns a Tensor of scales of the underlying quantizer. It has the number of +elements that matches the corresponding dimensions (from q_per_channel_axis) of +the tensor. +""", +) + +add_docstr_all( + "q_per_channel_zero_points", + r""" +q_per_channel_zero_points() -> Tensor + +Given a Tensor quantized by linear (affine) per-channel quantization, +returns a tensor of zero_points of the underlying quantizer. It has the number of +elements that matches the corresponding dimensions (from q_per_channel_axis) of +the tensor. +""", +) + +add_docstr_all( + "q_per_channel_axis", + r""" +q_per_channel_axis() -> int + +Given a Tensor quantized by linear (affine) per-channel quantization, +returns the index of dimension on which per-channel quantization is applied. +""", +) + +add_docstr_all( + "random_", + r""" +random_(from=0, to=None, *, generator=None) -> Tensor + +Fills :attr:`self` tensor with numbers sampled from the discrete uniform +distribution over ``[from, to - 1]``. If not specified, the values are usually +only bounded by :attr:`self` tensor's data type. However, for floating point +types, if unspecified, range will be ``[0, 2^mantissa]`` to ensure that every +value is representable. For example, `torch.tensor(1, dtype=torch.double).random_()` +will be uniform in ``[0, 2^53]``. +""", +) + +add_docstr_all( + "rad2deg", + r""" +rad2deg() -> Tensor + +See :func:`torch.rad2deg` +""", +) + +add_docstr_all( + "rad2deg_", + r""" +rad2deg_() -> Tensor + +In-place version of :meth:`~Tensor.rad2deg` +""", +) + +add_docstr_all( + "deg2rad", + r""" +deg2rad() -> Tensor + +See :func:`torch.deg2rad` +""", +) + +add_docstr_all( + "deg2rad_", + r""" +deg2rad_() -> Tensor + +In-place version of :meth:`~Tensor.deg2rad` +""", +) + +add_docstr_all( + "ravel", + r""" +ravel() -> Tensor + +see :func:`torch.ravel` +""", +) + +add_docstr_all( + "reciprocal", + r""" +reciprocal() -> Tensor + +See :func:`torch.reciprocal` +""", +) + +add_docstr_all( + "reciprocal_", + r""" +reciprocal_() -> Tensor + +In-place version of :meth:`~Tensor.reciprocal` +""", +) + +add_docstr_all( + "record_stream", + r""" +record_stream(stream) + +Marks the tensor as having been used by this stream. When the tensor +is deallocated, ensure the tensor memory is not reused for another tensor +until all work queued on :attr:`stream` at the time of deallocation is +complete. + +.. note:: + + The caching allocator is aware of only the stream where a tensor was + allocated. Due to the awareness, it already correctly manages the life + cycle of tensors on only one stream. But if a tensor is used on a stream + different from the stream of origin, the allocator might reuse the memory + unexpectedly. Calling this method lets the allocator know which streams + have used the tensor. + +.. warning:: + + This method is most suitable for use cases where you are providing a + function that created a tensor on a side stream, and want users to be able + to make use of the tensor without having to think carefully about stream + safety when making use of them. These safety guarantees come at some + performance and predictability cost (analogous to the tradeoff between GC + and manual memory management), so if you are in a situation where + you manage the full lifetime of your tensors, you may consider instead + manually managing CUDA events so that calling this method is not necessary. + In particular, when you call this method, on later allocations the + allocator will poll the recorded stream to see if all operations have + completed yet; you can potentially race with side stream computation and + non-deterministically reuse or fail to reuse memory for an allocation. + + You can safely use tensors allocated on side streams without + :meth:`~Tensor.record_stream`; you must manually ensure that + any non-creation stream uses of a tensor are synced back to the creation + stream before you deallocate the tensor. As the CUDA caching allocator + guarantees that the memory will only be reused with the same creation stream, + this is sufficient to ensure that writes to future reallocations of the + memory will be delayed until non-creation stream uses are done. + (Counterintuitively, you may observe that on the CPU side we have already + reallocated the tensor, even though CUDA kernels on the old tensor are + still in progress. This is fine, because CUDA operations on the new + tensor will appropriately wait for the old operations to complete, as they + are all on the same stream.) + + Concretely, this looks like this:: + + with torch.cuda.stream(s0): + x = torch.zeros(N) + + s1.wait_stream(s0) + with torch.cuda.stream(s1): + y = some_comm_op(x) + + ... some compute on s0 ... + + # synchronize creation stream s0 to side stream s1 + # before deallocating x + s0.wait_stream(s1) + del x + + Note that some discretion is required when deciding when to perform + ``s0.wait_stream(s1)``. In particular, if we were to wait immediately + after ``some_comm_op``, there wouldn't be any point in having the side + stream; it would be equivalent to have run ``some_comm_op`` on ``s0``. + Instead, the synchronization must be placed at some appropriate, later + point in time where you expect the side stream ``s1`` to have finished + work. This location is typically identified via profiling, e.g., using + Chrome traces produced + :meth:`torch.autograd.profiler.profile.export_chrome_trace`. If you + place the wait too early, work on s0 will block until ``s1`` has finished, + preventing further overlapping of communication and computation. If you + place the wait too late, you will use more memory than is strictly + necessary (as you are keeping ``x`` live for longer.) For a concrete + example of how this guidance can be applied in practice, see this post: + `FSDP and CUDACachingAllocator + `_. +""", +) + +add_docstr_all( + "remainder", + r""" +remainder(divisor) -> Tensor + +See :func:`torch.remainder` +""", +) + +add_docstr_all( + "remainder_", + r""" +remainder_(divisor) -> Tensor + +In-place version of :meth:`~Tensor.remainder` +""", +) + +add_docstr_all( + "renorm", + r""" +renorm(p, dim, maxnorm) -> Tensor + +See :func:`torch.renorm` +""", +) + +add_docstr_all( + "renorm_", + r""" +renorm_(p, dim, maxnorm) -> Tensor + +In-place version of :meth:`~Tensor.renorm` +""", +) + +add_docstr_all( + "repeat", + r""" +repeat(*sizes) -> Tensor + +Repeats this tensor along the specified dimensions. + +Unlike :meth:`~Tensor.expand`, this function copies the tensor's data. + +.. warning:: + + :meth:`~Tensor.repeat` behaves differently from + `numpy.repeat `_, + but is more similar to + `numpy.tile `_. + For the operator similar to `numpy.repeat`, see :func:`torch.repeat_interleave`. + +Args: + sizes (torch.Size or int...): The number of times to repeat this tensor along each + dimension + +Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> x.repeat(4, 2) + tensor([[ 1, 2, 3, 1, 2, 3], + [ 1, 2, 3, 1, 2, 3], + [ 1, 2, 3, 1, 2, 3], + [ 1, 2, 3, 1, 2, 3]]) + >>> x.repeat(4, 2, 1).size() + torch.Size([4, 2, 3]) +""", +) + +add_docstr_all( + "repeat_interleave", + r""" +repeat_interleave(repeats, dim=None, *, output_size=None) -> Tensor + +See :func:`torch.repeat_interleave`. +""", +) + +add_docstr_all( + "requires_grad_", + r""" +requires_grad_(requires_grad=True) -> Tensor + +Change if autograd should record operations on this tensor: sets this tensor's +:attr:`requires_grad` attribute in-place. Returns this tensor. + +:func:`requires_grad_`'s main use case is to tell autograd to begin recording +operations on a Tensor ``tensor``. If ``tensor`` has ``requires_grad=False`` +(because it was obtained through a DataLoader, or required preprocessing or +initialization), ``tensor.requires_grad_()`` makes it so that autograd will +begin to record operations on ``tensor``. + +Args: + requires_grad (bool): If autograd should record operations on this tensor. + Default: ``True``. + +Example:: + + >>> # Let's say we want to preprocess some saved weights and use + >>> # the result as new weights. + >>> saved_weights = [0.1, 0.2, 0.3, 0.25] + >>> loaded_weights = torch.tensor(saved_weights) + >>> weights = preprocess(loaded_weights) # some function + >>> weights + tensor([-0.5503, 0.4926, -2.1158, -0.8303]) + + >>> # Now, start to record operations done to weights + >>> weights.requires_grad_() + >>> out = weights.pow(2).sum() + >>> out.backward() + >>> weights.grad + tensor([-1.1007, 0.9853, -4.2316, -1.6606]) + +""", +) + +add_docstr_all( + "reshape", + r""" +reshape(*shape) -> Tensor + +Returns a tensor with the same data and number of elements as :attr:`self` +but with the specified shape. This method returns a view if :attr:`shape` is +compatible with the current shape. See :meth:`torch.Tensor.view` on when it is +possible to return a view. + +See :func:`torch.reshape` + +Args: + shape (tuple of ints or int...): the desired shape + +""", +) + +add_docstr_all( + "reshape_as", + r""" +reshape_as(other) -> Tensor + +Returns this tensor as the same shape as :attr:`other`. +``self.reshape_as(other)`` is equivalent to ``self.reshape(other.sizes())``. +This method returns a view if ``other.sizes()`` is compatible with the current +shape. See :meth:`torch.Tensor.view` on when it is possible to return a view. + +Please see :meth:`reshape` for more information about ``reshape``. + +Args: + other (:class:`torch.Tensor`): The result tensor has the same shape + as :attr:`other`. +""", +) + +add_docstr_all( + "resize_", + r""" +resize_(*sizes, memory_format=torch.contiguous_format) -> Tensor + +Resizes :attr:`self` tensor to the specified size. If the number of elements is +larger than the current storage size, then the underlying storage is resized +to fit the new number of elements. If the number of elements is smaller, the +underlying storage is not changed. Existing elements are preserved but any new +memory is uninitialized. + +.. warning:: + + This is a low-level method. The storage is reinterpreted as C-contiguous, + ignoring the current strides (unless the target size equals the current + size, in which case the tensor is left unchanged). For most purposes, you + will instead want to use :meth:`~Tensor.view()`, which checks for + contiguity, or :meth:`~Tensor.reshape()`, which copies data if needed. To + change the size in-place with custom strides, see :meth:`~Tensor.set_()`. + +.. note:: + + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, new elements are initialized to prevent nondeterministic behavior + from using the result as an input to an operation. Floating point and + complex values are set to NaN, and integer values are set to the maximum + value. + +Args: + sizes (torch.Size or int...): the desired size + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + Tensor. Default: ``torch.contiguous_format``. Note that memory format of + :attr:`self` is going to be unaffected if ``self.size()`` matches ``sizes``. + +Example:: + + >>> x = torch.tensor([[1, 2], [3, 4], [5, 6]]) + >>> x.resize_(2, 2) + tensor([[ 1, 2], + [ 3, 4]]) +""", +) + +add_docstr_all( + "resize_as_", + r""" +resize_as_(tensor, memory_format=torch.contiguous_format) -> Tensor + +Resizes the :attr:`self` tensor to be the same size as the specified +:attr:`tensor`. This is equivalent to ``self.resize_(tensor.size())``. + +Args: + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + Tensor. Default: ``torch.contiguous_format``. Note that memory format of + :attr:`self` is going to be unaffected if ``self.size()`` matches ``tensor.size()``. + +""", +) + +add_docstr_all( + "rot90", + r""" +rot90(k, dims) -> Tensor + +See :func:`torch.rot90` +""", +) + +add_docstr_all( + "round", + r""" +round(decimals=0) -> Tensor + +See :func:`torch.round` +""", +) + +add_docstr_all( + "round_", + r""" +round_(decimals=0) -> Tensor + +In-place version of :meth:`~Tensor.round` +""", +) + +add_docstr_all( + "rsqrt", + r""" +rsqrt() -> Tensor + +See :func:`torch.rsqrt` +""", +) + +add_docstr_all( + "rsqrt_", + r""" +rsqrt_() -> Tensor + +In-place version of :meth:`~Tensor.rsqrt` +""", +) + +add_docstr_all( + "scatter_", + r""" +scatter_(dim, index, src, *, reduce=None) -> Tensor + +Writes all values from the tensor :attr:`src` into :attr:`self` at the indices +specified in the :attr:`index` tensor. For each value in :attr:`src`, its output +index is specified by its index in :attr:`src` for ``dimension != dim`` and by +the corresponding value in :attr:`index` for ``dimension = dim``. + +For a 3-D tensor, :attr:`self` is updated as:: + + self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0 + self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1 + self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2 + +This is the reverse operation of the manner described in :meth:`~Tensor.gather`. + +:attr:`self`, :attr:`index` and :attr:`src` (if it is a Tensor) should all have +the same number of dimensions. It is also required that +``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that +``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``. +Note that ``index`` and ``src`` do not broadcast. + +Moreover, as for :meth:`~Tensor.gather`, the values of :attr:`index` must be +between ``0`` and ``self.size(dim) - 1`` inclusive. + +.. warning:: + + When indices are not unique, the behavior is non-deterministic (one of the + values from ``src`` will be picked arbitrarily) and the gradient will be + incorrect (it will be propagated to all locations in the source that + correspond to the same index)! + +.. note:: + + The backward pass is implemented only for ``src.shape == index.shape``. + +Additionally accepts an optional :attr:`reduce` argument that allows +specification of an optional reduction operation, which is applied to all +values in the tensor :attr:`src` into :attr:`self` at the indices +specified in the :attr:`index`. For each value in :attr:`src`, the reduction +operation is applied to an index in :attr:`self` which is specified by +its index in :attr:`src` for ``dimension != dim`` and by the corresponding +value in :attr:`index` for ``dimension = dim``. + +Given a 3-D tensor and reduction using the multiplication operation, :attr:`self` +is updated as:: + + self[index[i][j][k]][j][k] *= src[i][j][k] # if dim == 0 + self[i][index[i][j][k]][k] *= src[i][j][k] # if dim == 1 + self[i][j][index[i][j][k]] *= src[i][j][k] # if dim == 2 + +Reducing with the addition operation is the same as using +:meth:`~torch.Tensor.scatter_add_`. + +.. warning:: + The reduce argument with Tensor ``src`` is deprecated and will be removed in + a future PyTorch release. Please use :meth:`~torch.Tensor.scatter_reduce_` + instead for more reduction options. + +Args: + dim (int): the axis along which to index + index (LongTensor): the indices of elements to scatter, can be either empty + or of the same dimensionality as ``src``. When empty, the operation + returns ``self`` unchanged. + src (Tensor): the source element(s) to scatter. + +Keyword args: + reduce (str, optional): reduction operation to apply, can be either + ``'add'`` or ``'multiply'``. + +Example:: + + >>> src = torch.arange(1, 11).reshape((2, 5)) + >>> src + tensor([[ 1, 2, 3, 4, 5], + [ 6, 7, 8, 9, 10]]) + >>> index = torch.tensor([[0, 1, 2, 0]]) + >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(0, index, src) + tensor([[1, 0, 0, 4, 0], + [0, 2, 0, 0, 0], + [0, 0, 3, 0, 0]]) + >>> index = torch.tensor([[0, 1, 2], [0, 1, 4]]) + >>> torch.zeros(3, 5, dtype=src.dtype).scatter_(1, index, src) + tensor([[1, 2, 3, 0, 0], + [6, 7, 0, 0, 8], + [0, 0, 0, 0, 0]]) + + >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]), + ... 1.23, reduce='multiply') + tensor([[2.0000, 2.0000, 2.4600, 2.0000], + [2.0000, 2.0000, 2.0000, 2.4600]]) + >>> torch.full((2, 4), 2.).scatter_(1, torch.tensor([[2], [3]]), + ... 1.23, reduce='add') + tensor([[2.0000, 2.0000, 3.2300, 2.0000], + [2.0000, 2.0000, 2.0000, 3.2300]]) + +.. function:: scatter_(dim, index, value, *, reduce=None) -> Tensor: + :noindex: + +Writes the value from :attr:`value` into :attr:`self` at the indices +specified in the :attr:`index` tensor. This operation is equivalent to the previous version, +with the :attr:`src` tensor filled entirely with :attr:`value`. + +Args: + dim (int): the axis along which to index + index (LongTensor): the indices of elements to scatter, can be either empty + or of the same dimensionality as ``src``. When empty, the operation + returns ``self`` unchanged. + value (Scalar): the value to scatter. + +Keyword args: + reduce (str, optional): reduction operation to apply, can be either + ``'add'`` or ``'multiply'``. + +Example:: + + >>> index = torch.tensor([[0, 1]]) + >>> value = 2 + >>> torch.zeros(3, 5).scatter_(0, index, value) + tensor([[2., 0., 0., 0., 0.], + [0., 2., 0., 0., 0.], + [0., 0., 0., 0., 0.]]) +""", +) + +add_docstr_all( + "scatter_add_", + r""" +scatter_add_(dim, index, src) -> Tensor + +Adds all values from the tensor :attr:`src` into :attr:`self` at the indices +specified in the :attr:`index` tensor in a similar fashion as +:meth:`~torch.Tensor.scatter_`. For each value in :attr:`src`, it is added to +an index in :attr:`self` which is specified by its index in :attr:`src` +for ``dimension != dim`` and by the corresponding value in :attr:`index` for +``dimension = dim``. + +For a 3-D tensor, :attr:`self` is updated as:: + + self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0 + self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1 + self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2 + +:attr:`self`, :attr:`index` and :attr:`src` should have same number of +dimensions. It is also required that ``index.size(d) <= src.size(d)`` for all +dimensions ``d``, and that ``index.size(d) <= self.size(d)`` for all dimensions +``d != dim``. Note that ``index`` and ``src`` do not broadcast. + +Note: + {forward_reproducibility_note} + +.. note:: + + The backward pass is implemented only for ``src.shape == index.shape``. + +Args: + dim (int): the axis along which to index + index (LongTensor): the indices of elements to scatter and add, can be + either empty or of the same dimensionality as ``src``. When empty, the + operation returns ``self`` unchanged. + src (Tensor): the source elements to scatter and add + +Example:: + + >>> src = torch.ones((2, 5)) + >>> index = torch.tensor([[0, 1, 2, 0, 0]]) + >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src) + tensor([[1., 0., 0., 1., 1.], + [0., 1., 0., 0., 0.], + [0., 0., 1., 0., 0.]]) + >>> index = torch.tensor([[0, 1, 2, 0, 0], [0, 1, 2, 2, 2]]) + >>> torch.zeros(3, 5, dtype=src.dtype).scatter_add_(0, index, src) + tensor([[2., 0., 0., 1., 1.], + [0., 2., 0., 0., 0.], + [0., 0., 2., 1., 1.]]) + +""".format( + **reproducibility_notes + ), +) + +add_docstr_all( + "scatter_reduce_", + r""" +scatter_reduce_(dim, index, src, reduce, *, include_self=True) -> Tensor + +Reduces all values from the :attr:`src` tensor to the indices specified in +the :attr:`index` tensor in the :attr:`self` tensor using the applied reduction +defined via the :attr:`reduce` argument (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`, +:obj:`"amax"`, :obj:`"amin"`). For each value in :attr:`src`, it is reduced to an +index in :attr:`self` which is specified by its index in :attr:`src` for +``dimension != dim`` and by the corresponding value in :attr:`index` for +``dimension = dim``. If :obj:`include_self="True"`, the values in the :attr:`self` +tensor are included in the reduction. + +:attr:`self`, :attr:`index` and :attr:`src` should all have +the same number of dimensions. It is also required that +``index.size(d) <= src.size(d)`` for all dimensions ``d``, and that +``index.size(d) <= self.size(d)`` for all dimensions ``d != dim``. +Note that ``index`` and ``src`` do not broadcast. + +For a 3-D tensor with :obj:`reduce="sum"` and :obj:`include_self=True` the +output is given as:: + + self[index[i][j][k]][j][k] += src[i][j][k] # if dim == 0 + self[i][index[i][j][k]][k] += src[i][j][k] # if dim == 1 + self[i][j][index[i][j][k]] += src[i][j][k] # if dim == 2 + +Note: + {forward_reproducibility_note} + +.. note:: + + The backward pass is implemented only for ``src.shape == index.shape``. + +.. warning:: + + This function is in beta and may change in the near future. + +Args: + dim (int): the axis along which to index + index (LongTensor): the indices of elements to scatter and reduce. + src (Tensor): the source elements to scatter and reduce + reduce (str): the reduction operation to apply for non-unique indices + (:obj:`"sum"`, :obj:`"prod"`, :obj:`"mean"`, :obj:`"amax"`, :obj:`"amin"`) + include_self (bool): whether elements from the :attr:`self` tensor are + included in the reduction + +Example:: + + >>> src = torch.tensor([1., 2., 3., 4., 5., 6.]) + >>> index = torch.tensor([0, 1, 0, 1, 2, 1]) + >>> input = torch.tensor([1., 2., 3., 4.]) + >>> input.scatter_reduce(0, index, src, reduce="sum") + tensor([5., 14., 8., 4.]) + >>> input.scatter_reduce(0, index, src, reduce="sum", include_self=False) + tensor([4., 12., 5., 4.]) + >>> input2 = torch.tensor([5., 4., 3., 2.]) + >>> input2.scatter_reduce(0, index, src, reduce="amax") + tensor([5., 6., 5., 2.]) + >>> input2.scatter_reduce(0, index, src, reduce="amax", include_self=False) + tensor([3., 6., 5., 2.]) + + +""".format( + **reproducibility_notes + ), +) + +add_docstr_all( + "select", + r""" +select(dim, index) -> Tensor + +See :func:`torch.select` +""", +) + +add_docstr_all( + "select_scatter", + r""" +select_scatter(src, dim, index) -> Tensor + +See :func:`torch.select_scatter` +""", +) + +add_docstr_all( + "slice_scatter", + r""" +slice_scatter(src, dim=0, start=None, end=None, step=1) -> Tensor + +See :func:`torch.slice_scatter` +""", +) + +add_docstr_all( + "set_", + r""" +set_(source=None, storage_offset=0, size=None, stride=None) -> Tensor + +Sets the underlying storage, size, and strides. If :attr:`source` is a tensor, +:attr:`self` tensor will share the same storage and have the same size and +strides as :attr:`source`. Changes to elements in one tensor will be reflected +in the other. + +If :attr:`source` is a :class:`~torch.Storage`, the method sets the underlying +storage, offset, size, and stride. + +Args: + source (Tensor or Storage): the tensor or storage to use + storage_offset (int, optional): the offset in the storage + size (torch.Size, optional): the desired size. Defaults to the size of the source. + stride (tuple, optional): the desired stride. Defaults to C-contiguous strides. +""", +) + +add_docstr_all( + "sigmoid", + r""" +sigmoid() -> Tensor + +See :func:`torch.sigmoid` +""", +) + +add_docstr_all( + "sigmoid_", + r""" +sigmoid_() -> Tensor + +In-place version of :meth:`~Tensor.sigmoid` +""", +) + +add_docstr_all( + "logit", + r""" +logit() -> Tensor + +See :func:`torch.logit` +""", +) + +add_docstr_all( + "logit_", + r""" +logit_() -> Tensor + +In-place version of :meth:`~Tensor.logit` +""", +) + +add_docstr_all( + "sign", + r""" +sign() -> Tensor + +See :func:`torch.sign` +""", +) + +add_docstr_all( + "sign_", + r""" +sign_() -> Tensor + +In-place version of :meth:`~Tensor.sign` +""", +) + +add_docstr_all( + "signbit", + r""" +signbit() -> Tensor + +See :func:`torch.signbit` +""", +) + +add_docstr_all( + "sgn", + r""" +sgn() -> Tensor + +See :func:`torch.sgn` +""", +) + +add_docstr_all( + "sgn_", + r""" +sgn_() -> Tensor + +In-place version of :meth:`~Tensor.sgn` +""", +) + +add_docstr_all( + "sin", + r""" +sin() -> Tensor + +See :func:`torch.sin` +""", +) + +add_docstr_all( + "sin_", + r""" +sin_() -> Tensor + +In-place version of :meth:`~Tensor.sin` +""", +) + +add_docstr_all( + "sinc", + r""" +sinc() -> Tensor + +See :func:`torch.sinc` +""", +) + +add_docstr_all( + "sinc_", + r""" +sinc_() -> Tensor + +In-place version of :meth:`~Tensor.sinc` +""", +) + +add_docstr_all( + "sinh", + r""" +sinh() -> Tensor + +See :func:`torch.sinh` +""", +) + +add_docstr_all( + "sinh_", + r""" +sinh_() -> Tensor + +In-place version of :meth:`~Tensor.sinh` +""", +) + +add_docstr_all( + "size", + r""" +size(dim=None) -> torch.Size or int + +Returns the size of the :attr:`self` tensor. If ``dim`` is not specified, +the returned value is a :class:`torch.Size`, a subclass of :class:`tuple`. +If ``dim`` is specified, returns an int holding the size of that dimension. + +Args: + dim (int, optional): The dimension for which to retrieve the size. + +Example:: + + >>> t = torch.empty(3, 4, 5) + >>> t.size() + torch.Size([3, 4, 5]) + >>> t.size(dim=1) + 4 + +""", +) + +add_docstr_all( + "shape", + r""" +shape() -> torch.Size + +Returns the size of the :attr:`self` tensor. Alias for :attr:`size`. + +See also :meth:`Tensor.size`. + +Example:: + + >>> t = torch.empty(3, 4, 5) + >>> t.size() + torch.Size([3, 4, 5]) + >>> t.shape + torch.Size([3, 4, 5]) + +""", +) + +add_docstr_all( + "sort", + r""" +sort(dim=-1, descending=False) -> (Tensor, LongTensor) + +See :func:`torch.sort` +""", +) + +add_docstr_all( + "msort", + r""" +msort() -> Tensor + +See :func:`torch.msort` +""", +) + +add_docstr_all( + "argsort", + r""" +argsort(dim=-1, descending=False) -> LongTensor + +See :func:`torch.argsort` +""", +) + +add_docstr_all( + "sparse_dim", + r""" +sparse_dim() -> int + +Return the number of sparse dimensions in a :ref:`sparse tensor ` :attr:`self`. + +.. note:: + Returns ``0`` if :attr:`self` is not a sparse tensor. + +See also :meth:`Tensor.dense_dim` and :ref:`hybrid tensors `. +""", +) + +add_docstr_all( + "sparse_resize_", + r""" +sparse_resize_(size, sparse_dim, dense_dim) -> Tensor + +Resizes :attr:`self` :ref:`sparse tensor ` to the desired +size and the number of sparse and dense dimensions. + +.. note:: + If the number of specified elements in :attr:`self` is zero, then + :attr:`size`, :attr:`sparse_dim`, and :attr:`dense_dim` can be any + size and positive integers such that ``len(size) == sparse_dim + + dense_dim``. + + If :attr:`self` specifies one or more elements, however, then each + dimension in :attr:`size` must not be smaller than the corresponding + dimension of :attr:`self`, :attr:`sparse_dim` must equal the number + of sparse dimensions in :attr:`self`, and :attr:`dense_dim` must + equal the number of dense dimensions in :attr:`self`. + +.. warning:: + Throws an error if :attr:`self` is not a sparse tensor. + +Args: + size (torch.Size): the desired size. If :attr:`self` is non-empty + sparse tensor, the desired size cannot be smaller than the + original size. + sparse_dim (int): the number of sparse dimensions + dense_dim (int): the number of dense dimensions +""", +) + +add_docstr_all( + "sparse_resize_and_clear_", + r""" +sparse_resize_and_clear_(size, sparse_dim, dense_dim) -> Tensor + +Removes all specified elements from a :ref:`sparse tensor +` :attr:`self` and resizes :attr:`self` to the desired +size and the number of sparse and dense dimensions. + +.. warning: + Throws an error if :attr:`self` is not a sparse tensor. + +Args: + size (torch.Size): the desired size. + sparse_dim (int): the number of sparse dimensions + dense_dim (int): the number of dense dimensions +""", +) + +add_docstr_all( + "sqrt", + r""" +sqrt() -> Tensor + +See :func:`torch.sqrt` +""", +) + +add_docstr_all( + "sqrt_", + r""" +sqrt_() -> Tensor + +In-place version of :meth:`~Tensor.sqrt` +""", +) + +add_docstr_all( + "square", + r""" +square() -> Tensor + +See :func:`torch.square` +""", +) + +add_docstr_all( + "square_", + r""" +square_() -> Tensor + +In-place version of :meth:`~Tensor.square` +""", +) + +add_docstr_all( + "squeeze", + r""" +squeeze(dim=None) -> Tensor + +See :func:`torch.squeeze` +""", +) + +add_docstr_all( + "squeeze_", + r""" +squeeze_(dim=None) -> Tensor + +In-place version of :meth:`~Tensor.squeeze` +""", +) + +add_docstr_all( + "std", + r""" +std(dim=None, *, correction=1, keepdim=False) -> Tensor + +See :func:`torch.std` +""", +) + +add_docstr_all( + "storage_offset", + r""" +storage_offset() -> int + +Returns :attr:`self` tensor's offset in the underlying storage in terms of +number of storage elements (not bytes). + +Example:: + + >>> x = torch.tensor([1, 2, 3, 4, 5]) + >>> x.storage_offset() + 0 + >>> x[3:].storage_offset() + 3 + +""", +) + +add_docstr_all( + "untyped_storage", + r""" +untyped_storage() -> torch.UntypedStorage + +Returns the underlying :class:`UntypedStorage`. +""", +) + +add_docstr_all( + "stride", + r""" +stride(dim) -> tuple or int + +Returns the stride of :attr:`self` tensor. + +Stride is the jump necessary to go from one element to the next one in the +specified dimension :attr:`dim`. A tuple of all strides is returned when no +argument is passed in. Otherwise, an integer value is returned as the stride in +the particular dimension :attr:`dim`. + +Args: + dim (int, optional): the desired dimension in which stride is required + +Example:: + + >>> x = torch.tensor([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]) + >>> x.stride() + (5, 1) + >>> x.stride(0) + 5 + >>> x.stride(-1) + 1 + +""", +) + +add_docstr_all( + "sub", + r""" +sub(other, *, alpha=1) -> Tensor + +See :func:`torch.sub`. +""", +) + +add_docstr_all( + "sub_", + r""" +sub_(other, *, alpha=1) -> Tensor + +In-place version of :meth:`~Tensor.sub` +""", +) + +add_docstr_all( + "subtract", + r""" +subtract(other, *, alpha=1) -> Tensor + +See :func:`torch.subtract`. +""", +) + +add_docstr_all( + "subtract_", + r""" +subtract_(other, *, alpha=1) -> Tensor + +In-place version of :meth:`~Tensor.subtract`. +""", +) + +add_docstr_all( + "sum", + r""" +sum(dim=None, keepdim=False, dtype=None) -> Tensor + +See :func:`torch.sum` +""", +) + +add_docstr_all( + "nansum", + r""" +nansum(dim=None, keepdim=False, dtype=None) -> Tensor + +See :func:`torch.nansum` +""", +) + +add_docstr_all( + "svd", + r""" +svd(some=True, compute_uv=True) -> (Tensor, Tensor, Tensor) + +See :func:`torch.svd` +""", +) + +add_docstr_all( + "swapdims", + r""" +swapdims(dim0, dim1) -> Tensor + +See :func:`torch.swapdims` +""", +) + +add_docstr_all( + "swapdims_", + r""" +swapdims_(dim0, dim1) -> Tensor + +In-place version of :meth:`~Tensor.swapdims` +""", +) + +add_docstr_all( + "swapaxes", + r""" +swapaxes(axis0, axis1) -> Tensor + +See :func:`torch.swapaxes` +""", +) + +add_docstr_all( + "swapaxes_", + r""" +swapaxes_(axis0, axis1) -> Tensor + +In-place version of :meth:`~Tensor.swapaxes` +""", +) + +add_docstr_all( + "t", + r""" +t() -> Tensor + +See :func:`torch.t` +""", +) + +add_docstr_all( + "t_", + r""" +t_() -> Tensor + +In-place version of :meth:`~Tensor.t` +""", +) + +add_docstr_all( + "tile", + r""" +tile(dims) -> Tensor + +See :func:`torch.tile` +""", +) + +add_docstr_all( + "to", + r""" +to(*args, **kwargs) -> Tensor + +Performs Tensor dtype and/or device conversion. A :class:`torch.dtype` and :class:`torch.device` are +inferred from the arguments of ``self.to(*args, **kwargs)``. + +.. note:: + + If the ``self`` Tensor already + has the correct :class:`torch.dtype` and :class:`torch.device`, then ``self`` is returned. + Otherwise, the returned tensor is a copy of ``self`` with the desired + :class:`torch.dtype` and :class:`torch.device`. + +Here are the ways to call ``to``: + +.. method:: to(dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor + :noindex: + + Returns a Tensor with the specified :attr:`dtype` + + Args: + {memory_format} + +.. method:: to(device=None, dtype=None, non_blocking=False, copy=False, memory_format=torch.preserve_format) -> Tensor + :noindex: + + Returns a Tensor with the specified :attr:`device` and (optional) + :attr:`dtype`. If :attr:`dtype` is ``None`` it is inferred to be ``self.dtype``. + When :attr:`non_blocking`, tries to convert asynchronously with respect to + the host if possible, e.g., converting a CPU Tensor with pinned memory to a + CUDA Tensor. + When :attr:`copy` is set, a new Tensor is created even when the Tensor + already matches the desired conversion. + + Args: + {memory_format} + +.. method:: to(other, non_blocking=False, copy=False) -> Tensor + :noindex: + + Returns a Tensor with same :class:`torch.dtype` and :class:`torch.device` as + the Tensor :attr:`other`. When :attr:`non_blocking`, tries to convert + asynchronously with respect to the host if possible, e.g., converting a CPU + Tensor with pinned memory to a CUDA Tensor. + When :attr:`copy` is set, a new Tensor is created even when the Tensor + already matches the desired conversion. + +Example:: + + >>> tensor = torch.randn(2, 2) # Initially dtype=float32, device=cpu + >>> tensor.to(torch.float64) + tensor([[-0.5044, 0.0005], + [ 0.3310, -0.0584]], dtype=torch.float64) + + >>> cuda0 = torch.device('cuda:0') + >>> tensor.to(cuda0) + tensor([[-0.5044, 0.0005], + [ 0.3310, -0.0584]], device='cuda:0') + + >>> tensor.to(cuda0, dtype=torch.float64) + tensor([[-0.5044, 0.0005], + [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0') + + >>> other = torch.randn((), dtype=torch.float64, device=cuda0) + >>> tensor.to(other, non_blocking=True) + tensor([[-0.5044, 0.0005], + [ 0.3310, -0.0584]], dtype=torch.float64, device='cuda:0') +""".format( + **common_args + ), +) + +add_docstr_all( + "byte", + r""" +byte(memory_format=torch.preserve_format) -> Tensor + +``self.byte()`` is equivalent to ``self.to(torch.uint8)``. See :func:`to`. + +Args: + {memory_format} +""".format( + **common_args + ), +) + +add_docstr_all( + "bool", + r""" +bool(memory_format=torch.preserve_format) -> Tensor + +``self.bool()`` is equivalent to ``self.to(torch.bool)``. See :func:`to`. + +Args: + {memory_format} +""".format( + **common_args + ), +) + +add_docstr_all( + "char", + r""" +char(memory_format=torch.preserve_format) -> Tensor + +``self.char()`` is equivalent to ``self.to(torch.int8)``. See :func:`to`. + +Args: + {memory_format} +""".format( + **common_args + ), +) + +add_docstr_all( + "bfloat16", + r""" +bfloat16(memory_format=torch.preserve_format) -> Tensor +``self.bfloat16()`` is equivalent to ``self.to(torch.bfloat16)``. See :func:`to`. + +Args: + {memory_format} +""".format( + **common_args + ), +) + +add_docstr_all( + "double", + r""" +double(memory_format=torch.preserve_format) -> Tensor + +``self.double()`` is equivalent to ``self.to(torch.float64)``. See :func:`to`. + +Args: + {memory_format} +""".format( + **common_args + ), +) + +add_docstr_all( + "float", + r""" +float(memory_format=torch.preserve_format) -> Tensor + +``self.float()`` is equivalent to ``self.to(torch.float32)``. See :func:`to`. + +Args: + {memory_format} +""".format( + **common_args + ), +) + +add_docstr_all( + "cdouble", + r""" +cdouble(memory_format=torch.preserve_format) -> Tensor + +``self.cdouble()`` is equivalent to ``self.to(torch.complex128)``. See :func:`to`. + +Args: + {memory_format} +""".format( + **common_args + ), +) + +add_docstr_all( + "cfloat", + r""" +cfloat(memory_format=torch.preserve_format) -> Tensor + +``self.cfloat()`` is equivalent to ``self.to(torch.complex64)``. See :func:`to`. + +Args: + {memory_format} +""".format( + **common_args + ), +) + +add_docstr_all( + "chalf", + r""" +chalf(memory_format=torch.preserve_format) -> Tensor + +``self.chalf()`` is equivalent to ``self.to(torch.complex32)``. See :func:`to`. + +Args: + {memory_format} + """.format( + **common_args + ), +) + +add_docstr_all( + "half", + r""" +half(memory_format=torch.preserve_format) -> Tensor + +``self.half()`` is equivalent to ``self.to(torch.float16)``. See :func:`to`. + +Args: + {memory_format} +""".format( + **common_args + ), +) + +add_docstr_all( + "int", + r""" +int(memory_format=torch.preserve_format) -> Tensor + +``self.int()`` is equivalent to ``self.to(torch.int32)``. See :func:`to`. + +Args: + {memory_format} +""".format( + **common_args + ), +) + +add_docstr_all( + "int_repr", + r""" +int_repr() -> Tensor + +Given a quantized Tensor, +``self.int_repr()`` returns a CPU Tensor with uint8_t as data type that stores the +underlying uint8_t values of the given Tensor. +""", +) + + +add_docstr_all( + "long", + r""" +long(memory_format=torch.preserve_format) -> Tensor + +``self.long()`` is equivalent to ``self.to(torch.int64)``. See :func:`to`. + +Args: + {memory_format} +""".format( + **common_args + ), +) + +add_docstr_all( + "short", + r""" +short(memory_format=torch.preserve_format) -> Tensor + +``self.short()`` is equivalent to ``self.to(torch.int16)``. See :func:`to`. + +Args: + {memory_format} +""".format( + **common_args + ), +) + +add_docstr_all( + "take", + r""" +take(indices) -> Tensor + +See :func:`torch.take` +""", +) + +add_docstr_all( + "take_along_dim", + r""" +take_along_dim(indices, dim) -> Tensor + +See :func:`torch.take_along_dim` +""", +) + +add_docstr_all( + "tan", + r""" +tan() -> Tensor + +See :func:`torch.tan` +""", +) + +add_docstr_all( + "tan_", + r""" +tan_() -> Tensor + +In-place version of :meth:`~Tensor.tan` +""", +) + +add_docstr_all( + "tanh", + r""" +tanh() -> Tensor + +See :func:`torch.tanh` +""", +) + +add_docstr_all( + "softmax", + r""" +softmax(dim) -> Tensor + +Alias for :func:`torch.nn.functional.softmax`. +""", +) + +add_docstr_all( + "tanh_", + r""" +tanh_() -> Tensor + +In-place version of :meth:`~Tensor.tanh` +""", +) + +add_docstr_all( + "tolist", + r""" +tolist() -> list or number + +Returns the tensor as a (nested) list. For scalars, a standard +Python number is returned, just like with :meth:`~Tensor.item`. +Tensors are automatically moved to the CPU first if necessary. + +This operation is not differentiable. + +Examples:: + + >>> a = torch.randn(2, 2) + >>> a.tolist() + [[0.012766935862600803, 0.5415473580360413], + [-0.08909505605697632, 0.7729271650314331]] + >>> a[0,0].tolist() + 0.012766935862600803 +""", +) + +add_docstr_all( + "topk", + r""" +topk(k, dim=None, largest=True, sorted=True) -> (Tensor, LongTensor) + +See :func:`torch.topk` +""", +) + +add_docstr_all( + "to_dense", + r""" +to_dense(dtype=None, *, masked_grad=True) -> Tensor + +Creates a strided copy of :attr:`self` if :attr:`self` is not a strided tensor, otherwise returns :attr:`self`. + +Keyword args: + {dtype} + masked_grad (bool, optional): If set to ``True`` (default) and + :attr:`self` has a sparse layout then the backward of + :meth:`to_dense` returns ``grad.sparse_mask(self)``. + +Example:: + + >>> s = torch.sparse_coo_tensor( + ... torch.tensor([[1, 1], + ... [0, 2]]), + ... torch.tensor([9, 10]), + ... size=(3, 3)) + >>> s.to_dense() + tensor([[ 0, 0, 0], + [ 9, 0, 10], + [ 0, 0, 0]]) +""", +) + +add_docstr_all( + "to_sparse", + r""" +to_sparse(sparseDims) -> Tensor + +Returns a sparse copy of the tensor. PyTorch supports sparse tensors in +:ref:`coordinate format `. + +Args: + sparseDims (int, optional): the number of sparse dimensions to include in the new sparse tensor + +Example:: + + >>> d = torch.tensor([[0, 0, 0], [9, 0, 10], [0, 0, 0]]) + >>> d + tensor([[ 0, 0, 0], + [ 9, 0, 10], + [ 0, 0, 0]]) + >>> d.to_sparse() + tensor(indices=tensor([[1, 1], + [0, 2]]), + values=tensor([ 9, 10]), + size=(3, 3), nnz=2, layout=torch.sparse_coo) + >>> d.to_sparse(1) + tensor(indices=tensor([[1]]), + values=tensor([[ 9, 0, 10]]), + size=(3, 3), nnz=1, layout=torch.sparse_coo) + +.. method:: to_sparse(*, layout=None, blocksize=None, dense_dim=None) -> Tensor + :noindex: + +Returns a sparse tensor with the specified layout and blocksize. If +the :attr:`self` is strided, the number of dense dimensions could be +specified, and a hybrid sparse tensor will be created, with +`dense_dim` dense dimensions and `self.dim() - 2 - dense_dim` batch +dimension. + +.. note:: If the :attr:`self` layout and blocksize parameters match + with the specified layout and blocksize, return + :attr:`self`. Otherwise, return a sparse tensor copy of + :attr:`self`. + +Args: + + layout (:class:`torch.layout`, optional): The desired sparse + layout. One of ``torch.sparse_coo``, ``torch.sparse_csr``, + ``torch.sparse_csc``, ``torch.sparse_bsr``, or + ``torch.sparse_bsc``. Default: if ``None``, + ``torch.sparse_coo``. + + blocksize (list, tuple, :class:`torch.Size`, optional): Block size + of the resulting BSR or BSC tensor. For other layouts, + specifying the block size that is not ``None`` will result in a + RuntimeError exception. A block size must be a tuple of length + two such that its items evenly divide the two sparse dimensions. + + dense_dim (int, optional): Number of dense dimensions of the + resulting CSR, CSC, BSR or BSC tensor. This argument should be + used only if :attr:`self` is a strided tensor, and must be a + value between 0 and dimension of :attr:`self` tensor minus two. + +Example:: + + >>> x = torch.tensor([[1, 0], [0, 0], [2, 3]]) + >>> x.to_sparse(layout=torch.sparse_coo) + tensor(indices=tensor([[0, 2, 2], + [0, 0, 1]]), + values=tensor([1, 2, 3]), + size=(3, 2), nnz=3, layout=torch.sparse_coo) + >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(1, 2)) + tensor(crow_indices=tensor([0, 1, 1, 2]), + col_indices=tensor([0, 0]), + values=tensor([[[1, 0]], + [[2, 3]]]), size=(3, 2), nnz=2, layout=torch.sparse_bsr) + >>> x.to_sparse(layout=torch.sparse_bsr, blocksize=(2, 1)) + RuntimeError: Tensor size(-2) 3 needs to be divisible by blocksize[0] 2 + >>> x.to_sparse(layout=torch.sparse_csr, blocksize=(3, 1)) + RuntimeError: to_sparse for Strided to SparseCsr conversion does not use specified blocksize + + >>> x = torch.tensor([[[1], [0]], [[0], [0]], [[2], [3]]]) + >>> x.to_sparse(layout=torch.sparse_csr, dense_dim=1) + tensor(crow_indices=tensor([0, 1, 1, 3]), + col_indices=tensor([0, 0, 1]), + values=tensor([[1], + [2], + [3]]), size=(3, 2, 1), nnz=3, layout=torch.sparse_csr) + +""", +) + +add_docstr_all( + "to_sparse_csr", + r""" +to_sparse_csr(dense_dim=None) -> Tensor + +Convert a tensor to compressed row storage format (CSR). Except for +strided tensors, only works with 2D tensors. If the :attr:`self` is +strided, then the number of dense dimensions could be specified, and a +hybrid CSR tensor will be created, with `dense_dim` dense dimensions +and `self.dim() - 2 - dense_dim` batch dimension. + +Args: + + dense_dim (int, optional): Number of dense dimensions of the + resulting CSR tensor. This argument should be used only if + :attr:`self` is a strided tensor, and must be a value between 0 + and dimension of :attr:`self` tensor minus two. + +Example:: + + >>> dense = torch.randn(5, 5) + >>> sparse = dense.to_sparse_csr() + >>> sparse._nnz() + 25 + + >>> dense = torch.zeros(3, 3, 1, 1) + >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1 + >>> dense.to_sparse_csr(dense_dim=2) + tensor(crow_indices=tensor([0, 1, 2, 3]), + col_indices=tensor([0, 2, 1]), + values=tensor([[[1.]], + + [[1.]], + + [[1.]]]), size=(3, 3, 1, 1), nnz=3, + layout=torch.sparse_csr) + +""", +) + +add_docstr_all( + "to_sparse_csc", + r""" +to_sparse_csc() -> Tensor + +Convert a tensor to compressed column storage (CSC) format. Except +for strided tensors, only works with 2D tensors. If the :attr:`self` +is strided, then the number of dense dimensions could be specified, +and a hybrid CSC tensor will be created, with `dense_dim` dense +dimensions and `self.dim() - 2 - dense_dim` batch dimension. + +Args: + + dense_dim (int, optional): Number of dense dimensions of the + resulting CSC tensor. This argument should be used only if + :attr:`self` is a strided tensor, and must be a value between 0 + and dimension of :attr:`self` tensor minus two. + +Example:: + + >>> dense = torch.randn(5, 5) + >>> sparse = dense.to_sparse_csc() + >>> sparse._nnz() + 25 + + >>> dense = torch.zeros(3, 3, 1, 1) + >>> dense[0, 0] = dense[1, 2] = dense[2, 1] = 1 + >>> dense.to_sparse_csc(dense_dim=2) + tensor(ccol_indices=tensor([0, 1, 2, 3]), + row_indices=tensor([0, 2, 1]), + values=tensor([[[1.]], + + [[1.]], + + [[1.]]]), size=(3, 3, 1, 1), nnz=3, + layout=torch.sparse_csc) + +""", +) + +add_docstr_all( + "to_sparse_bsr", + r""" +to_sparse_bsr(blocksize, dense_dim) -> Tensor + +Convert a tensor to a block sparse row (BSR) storage format of given +blocksize. If the :attr:`self` is strided, then the number of dense +dimensions could be specified, and a hybrid BSR tensor will be +created, with `dense_dim` dense dimensions and `self.dim() - 2 - +dense_dim` batch dimension. + +Args: + + blocksize (list, tuple, :class:`torch.Size`, optional): Block size + of the resulting BSR tensor. A block size must be a tuple of + length two such that its items evenly divide the two sparse + dimensions. + + dense_dim (int, optional): Number of dense dimensions of the + resulting BSR tensor. This argument should be used only if + :attr:`self` is a strided tensor, and must be a value between 0 + and dimension of :attr:`self` tensor minus two. + +Example:: + + >>> dense = torch.randn(10, 10) + >>> sparse = dense.to_sparse_csr() + >>> sparse_bsr = sparse.to_sparse_bsr((5, 5)) + >>> sparse_bsr.col_indices() + tensor([0, 1, 0, 1]) + + >>> dense = torch.zeros(4, 3, 1) + >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1 + >>> dense.to_sparse_bsr((2, 1), 1) + tensor(crow_indices=tensor([0, 2, 3]), + col_indices=tensor([0, 2, 1]), + values=tensor([[[[1.]], + + [[1.]]], + + + [[[1.]], + + [[1.]]], + + + [[[1.]], + + [[1.]]]]), size=(4, 3, 1), nnz=3, + layout=torch.sparse_bsr) + +""", +) + +add_docstr_all( + "to_sparse_bsc", + r""" +to_sparse_bsc(blocksize, dense_dim) -> Tensor + +Convert a tensor to a block sparse column (BSC) storage format of +given blocksize. If the :attr:`self` is strided, then the number of +dense dimensions could be specified, and a hybrid BSC tensor will be +created, with `dense_dim` dense dimensions and `self.dim() - 2 - +dense_dim` batch dimension. + +Args: + + blocksize (list, tuple, :class:`torch.Size`, optional): Block size + of the resulting BSC tensor. A block size must be a tuple of + length two such that its items evenly divide the two sparse + dimensions. + + dense_dim (int, optional): Number of dense dimensions of the + resulting BSC tensor. This argument should be used only if + :attr:`self` is a strided tensor, and must be a value between 0 + and dimension of :attr:`self` tensor minus two. + +Example:: + + >>> dense = torch.randn(10, 10) + >>> sparse = dense.to_sparse_csr() + >>> sparse_bsc = sparse.to_sparse_bsc((5, 5)) + >>> sparse_bsc.row_indices() + tensor([0, 1, 0, 1]) + + >>> dense = torch.zeros(4, 3, 1) + >>> dense[0:2, 0] = dense[0:2, 2] = dense[2:4, 1] = 1 + >>> dense.to_sparse_bsc((2, 1), 1) + tensor(ccol_indices=tensor([0, 1, 2, 3]), + row_indices=tensor([0, 1, 0]), + values=tensor([[[[1.]], + + [[1.]]], + + + [[[1.]], + + [[1.]]], + + + [[[1.]], + + [[1.]]]]), size=(4, 3, 1), nnz=3, + layout=torch.sparse_bsc) + +""", +) + +add_docstr_all( + "to_mkldnn", + r""" +to_mkldnn() -> Tensor +Returns a copy of the tensor in ``torch.mkldnn`` layout. + +""", +) + +add_docstr_all( + "trace", + r""" +trace() -> Tensor + +See :func:`torch.trace` +""", +) + +add_docstr_all( + "transpose", + r""" +transpose(dim0, dim1) -> Tensor + +See :func:`torch.transpose` +""", +) + +add_docstr_all( + "transpose_", + r""" +transpose_(dim0, dim1) -> Tensor + +In-place version of :meth:`~Tensor.transpose` +""", +) + +add_docstr_all( + "triangular_solve", + r""" +triangular_solve(A, upper=True, transpose=False, unitriangular=False) -> (Tensor, Tensor) + +See :func:`torch.triangular_solve` +""", +) + +add_docstr_all( + "tril", + r""" +tril(diagonal=0) -> Tensor + +See :func:`torch.tril` +""", +) + +add_docstr_all( + "tril_", + r""" +tril_(diagonal=0) -> Tensor + +In-place version of :meth:`~Tensor.tril` +""", +) + +add_docstr_all( + "triu", + r""" +triu(diagonal=0) -> Tensor + +See :func:`torch.triu` +""", +) + +add_docstr_all( + "triu_", + r""" +triu_(diagonal=0) -> Tensor + +In-place version of :meth:`~Tensor.triu` +""", +) + +add_docstr_all( + "true_divide", + r""" +true_divide(value) -> Tensor + +See :func:`torch.true_divide` +""", +) + +add_docstr_all( + "true_divide_", + r""" +true_divide_(value) -> Tensor + +In-place version of :meth:`~Tensor.true_divide_` +""", +) + +add_docstr_all( + "trunc", + r""" +trunc() -> Tensor + +See :func:`torch.trunc` +""", +) + +add_docstr_all( + "fix", + r""" +fix() -> Tensor + +See :func:`torch.fix`. +""", +) + +add_docstr_all( + "trunc_", + r""" +trunc_() -> Tensor + +In-place version of :meth:`~Tensor.trunc` +""", +) + +add_docstr_all( + "fix_", + r""" +fix_() -> Tensor + +In-place version of :meth:`~Tensor.fix` +""", +) + +add_docstr_all( + "type", + r""" +type(dtype=None, non_blocking=False, **kwargs) -> str or Tensor +Returns the type if `dtype` is not provided, else casts this object to +the specified type. + +If this is already of the correct type, no copy is performed and the +original object is returned. + +Args: + dtype (dtype or string): The desired type + non_blocking (bool): If ``True``, and the source is in pinned memory + and destination is on the GPU or vice versa, the copy is performed + asynchronously with respect to the host. Otherwise, the argument + has no effect. + **kwargs: For compatibility, may contain the key ``async`` in place of + the ``non_blocking`` argument. The ``async`` arg is deprecated. +""", +) + +add_docstr_all( + "type_as", + r""" +type_as(tensor) -> Tensor + +Returns this tensor cast to the type of the given tensor. + +This is a no-op if the tensor is already of the correct type. This is +equivalent to ``self.type(tensor.type())`` + +Args: + tensor (Tensor): the tensor which has the desired type +""", +) + +add_docstr_all( + "unfold", + r""" +unfold(dimension, size, step) -> Tensor + +Returns a view of the original tensor which contains all slices of size :attr:`size` from +:attr:`self` tensor in the dimension :attr:`dimension`. + +Step between two slices is given by :attr:`step`. + +If `sizedim` is the size of dimension :attr:`dimension` for :attr:`self`, the size of +dimension :attr:`dimension` in the returned tensor will be +`(sizedim - size) / step + 1`. + +An additional dimension of size :attr:`size` is appended in the returned tensor. + +Args: + dimension (int): dimension in which unfolding happens + size (int): the size of each slice that is unfolded + step (int): the step between each slice + +Example:: + + >>> x = torch.arange(1., 8) + >>> x + tensor([ 1., 2., 3., 4., 5., 6., 7.]) + >>> x.unfold(0, 2, 1) + tensor([[ 1., 2.], + [ 2., 3.], + [ 3., 4.], + [ 4., 5.], + [ 5., 6.], + [ 6., 7.]]) + >>> x.unfold(0, 2, 2) + tensor([[ 1., 2.], + [ 3., 4.], + [ 5., 6.]]) +""", +) + +add_docstr_all( + "uniform_", + r""" +uniform_(from=0, to=1, *, generator=None) -> Tensor + +Fills :attr:`self` tensor with numbers sampled from the continuous uniform +distribution: + +.. math:: + f(x) = \dfrac{1}{\text{to} - \text{from}} +""", +) + +add_docstr_all( + "unsqueeze", + r""" +unsqueeze(dim) -> Tensor + +See :func:`torch.unsqueeze` +""", +) + +add_docstr_all( + "unsqueeze_", + r""" +unsqueeze_(dim) -> Tensor + +In-place version of :meth:`~Tensor.unsqueeze` +""", +) + +add_docstr_all( + "var", + r""" +var(dim=None, *, correction=1, keepdim=False) -> Tensor + +See :func:`torch.var` +""", +) + +add_docstr_all( + "vdot", + r""" +vdot(other) -> Tensor + +See :func:`torch.vdot` +""", +) + +add_docstr_all( + "view", + r""" +view(*shape) -> Tensor + +Returns a new tensor with the same data as the :attr:`self` tensor but of a +different :attr:`shape`. + +The returned tensor shares the same data and must have the same number +of elements, but may have a different size. For a tensor to be viewed, the new +view size must be compatible with its original size and stride, i.e., each new +view dimension must either be a subspace of an original dimension, or only span +across original dimensions :math:`d, d+1, \dots, d+k` that satisfy the following +contiguity-like condition that :math:`\forall i = d, \dots, d+k-1`, + +.. math:: + + \text{stride}[i] = \text{stride}[i+1] \times \text{size}[i+1] + +Otherwise, it will not be possible to view :attr:`self` tensor as :attr:`shape` +without copying it (e.g., via :meth:`contiguous`). When it is unclear whether a +:meth:`view` can be performed, it is advisable to use :meth:`reshape`, which +returns a view if the shapes are compatible, and copies (equivalent to calling +:meth:`contiguous`) otherwise. + +Args: + shape (torch.Size or int...): the desired size + +Example:: + + >>> x = torch.randn(4, 4) + >>> x.size() + torch.Size([4, 4]) + >>> y = x.view(16) + >>> y.size() + torch.Size([16]) + >>> z = x.view(-1, 8) # the size -1 is inferred from other dimensions + >>> z.size() + torch.Size([2, 8]) + + >>> a = torch.randn(1, 2, 3, 4) + >>> a.size() + torch.Size([1, 2, 3, 4]) + >>> b = a.transpose(1, 2) # Swaps 2nd and 3rd dimension + >>> b.size() + torch.Size([1, 3, 2, 4]) + >>> c = a.view(1, 3, 2, 4) # Does not change tensor layout in memory + >>> c.size() + torch.Size([1, 3, 2, 4]) + >>> torch.equal(b, c) + False + + +.. method:: view(dtype) -> Tensor + :noindex: + +Returns a new tensor with the same data as the :attr:`self` tensor but of a +different :attr:`dtype`. + +If the element size of :attr:`dtype` is different than that of ``self.dtype``, +then the size of the last dimension of the output will be scaled +proportionally. For instance, if :attr:`dtype` element size is twice that of +``self.dtype``, then each pair of elements in the last dimension of +:attr:`self` will be combined, and the size of the last dimension of the output +will be half that of :attr:`self`. If :attr:`dtype` element size is half that +of ``self.dtype``, then each element in the last dimension of :attr:`self` will +be split in two, and the size of the last dimension of the output will be +double that of :attr:`self`. For this to be possible, the following conditions +must be true: + + * ``self.dim()`` must be greater than 0. + * ``self.stride(-1)`` must be 1. + +Additionally, if the element size of :attr:`dtype` is greater than that of +``self.dtype``, the following conditions must be true as well: + + * ``self.size(-1)`` must be divisible by the ratio between the element + sizes of the dtypes. + * ``self.storage_offset()`` must be divisible by the ratio between the + element sizes of the dtypes. + * The strides of all dimensions, except the last dimension, must be + divisible by the ratio between the element sizes of the dtypes. + +If any of the above conditions are not met, an error is thrown. + +.. warning:: + + This overload is not supported by TorchScript, and using it in a Torchscript + program will cause undefined behavior. + + +Args: + dtype (:class:`torch.dtype`): the desired dtype + +Example:: + + >>> x = torch.randn(4, 4) + >>> x + tensor([[ 0.9482, -0.0310, 1.4999, -0.5316], + [-0.1520, 0.7472, 0.5617, -0.8649], + [-2.4724, -0.0334, -0.2976, -0.8499], + [-0.2109, 1.9913, -0.9607, -0.6123]]) + >>> x.dtype + torch.float32 + + >>> y = x.view(torch.int32) + >>> y + tensor([[ 1064483442, -1124191867, 1069546515, -1089989247], + [-1105482831, 1061112040, 1057999968, -1084397505], + [-1071760287, -1123489973, -1097310419, -1084649136], + [-1101533110, 1073668768, -1082790149, -1088634448]], + dtype=torch.int32) + >>> y[0, 0] = 1000000000 + >>> x + tensor([[ 0.0047, -0.0310, 1.4999, -0.5316], + [-0.1520, 0.7472, 0.5617, -0.8649], + [-2.4724, -0.0334, -0.2976, -0.8499], + [-0.2109, 1.9913, -0.9607, -0.6123]]) + + >>> x.view(torch.cfloat) + tensor([[ 0.0047-0.0310j, 1.4999-0.5316j], + [-0.1520+0.7472j, 0.5617-0.8649j], + [-2.4724-0.0334j, -0.2976-0.8499j], + [-0.2109+1.9913j, -0.9607-0.6123j]]) + >>> x.view(torch.cfloat).size() + torch.Size([4, 2]) + + >>> x.view(torch.uint8) + tensor([[ 0, 202, 154, 59, 182, 243, 253, 188, 185, 252, 191, 63, 240, 22, + 8, 191], + [227, 165, 27, 190, 128, 72, 63, 63, 146, 203, 15, 63, 22, 106, + 93, 191], + [205, 59, 30, 192, 112, 206, 8, 189, 7, 95, 152, 190, 12, 147, + 89, 191], + [ 43, 246, 87, 190, 235, 226, 254, 63, 111, 240, 117, 191, 177, 191, + 28, 191]], dtype=torch.uint8) + >>> x.view(torch.uint8).size() + torch.Size([4, 16]) +""", +) + +add_docstr_all( + "view_as", + r""" +view_as(other) -> Tensor + +View this tensor as the same size as :attr:`other`. +``self.view_as(other)`` is equivalent to ``self.view(other.size())``. + +Please see :meth:`~Tensor.view` for more information about ``view``. + +Args: + other (:class:`torch.Tensor`): The result tensor has the same size + as :attr:`other`. +""", +) + +add_docstr_all( + "expand", + r""" +expand(*sizes) -> Tensor + +Returns a new view of the :attr:`self` tensor with singleton dimensions expanded +to a larger size. + +Passing -1 as the size for a dimension means not changing the size of +that dimension. + +Tensor can be also expanded to a larger number of dimensions, and the +new ones will be appended at the front. For the new dimensions, the +size cannot be set to -1. + +Expanding a tensor does not allocate new memory, but only creates a +new view on the existing tensor where a dimension of size one is +expanded to a larger size by setting the ``stride`` to 0. Any dimension +of size 1 can be expanded to an arbitrary value without allocating new +memory. + +Args: + *sizes (torch.Size or int...): the desired expanded size + +.. warning:: + + More than one element of an expanded tensor may refer to a single + memory location. As a result, in-place operations (especially ones that + are vectorized) may result in incorrect behavior. If you need to write + to the tensors, please clone them first. + +Example:: + + >>> x = torch.tensor([[1], [2], [3]]) + >>> x.size() + torch.Size([3, 1]) + >>> x.expand(3, 4) + tensor([[ 1, 1, 1, 1], + [ 2, 2, 2, 2], + [ 3, 3, 3, 3]]) + >>> x.expand(-1, 4) # -1 means not changing the size of that dimension + tensor([[ 1, 1, 1, 1], + [ 2, 2, 2, 2], + [ 3, 3, 3, 3]]) +""", +) + +add_docstr_all( + "expand_as", + r""" +expand_as(other) -> Tensor + +Expand this tensor to the same size as :attr:`other`. +``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``. + +Please see :meth:`~Tensor.expand` for more information about ``expand``. + +Args: + other (:class:`torch.Tensor`): The result tensor has the same size + as :attr:`other`. +""", +) + +add_docstr_all( + "sum_to_size", + r""" +sum_to_size(*size) -> Tensor + +Sum ``this`` tensor to :attr:`size`. +:attr:`size` must be broadcastable to ``this`` tensor size. + +Args: + size (int...): a sequence of integers defining the shape of the output tensor. +""", +) + + +add_docstr_all( + "zero_", + r""" +zero_() -> Tensor + +Fills :attr:`self` tensor with zeros. +""", +) + +add_docstr_all( + "matmul", + r""" +matmul(tensor2) -> Tensor + +See :func:`torch.matmul` +""", +) + +add_docstr_all( + "chunk", + r""" +chunk(chunks, dim=0) -> List of Tensors + +See :func:`torch.chunk` +""", +) + +add_docstr_all( + "unsafe_chunk", + r""" +unsafe_chunk(chunks, dim=0) -> List of Tensors + +See :func:`torch.unsafe_chunk` +""", +) + +add_docstr_all( + "unsafe_split", + r""" +unsafe_split(split_size, dim=0) -> List of Tensors + +See :func:`torch.unsafe_split` +""", +) + +add_docstr_all( + "tensor_split", + r""" +tensor_split(indices_or_sections, dim=0) -> List of Tensors + +See :func:`torch.tensor_split` +""", +) + +add_docstr_all( + "hsplit", + r""" +hsplit(split_size_or_sections) -> List of Tensors + +See :func:`torch.hsplit` +""", +) + +add_docstr_all( + "vsplit", + r""" +vsplit(split_size_or_sections) -> List of Tensors + +See :func:`torch.vsplit` +""", +) + +add_docstr_all( + "dsplit", + r""" +dsplit(split_size_or_sections) -> List of Tensors + +See :func:`torch.dsplit` +""", +) + +add_docstr_all( + "stft", + r""" +stft(frame_length, hop, fft_size=None, return_onesided=True, window=None, pad_end=0) -> Tensor + +See :func:`torch.stft` +""", +) + +add_docstr_all( + "istft", + r""" +istft(n_fft, hop_length=None, win_length=None, window=None, + center=True, normalized=False, onesided=True, length=None) -> Tensor + +See :func:`torch.istft` +""", +) + +add_docstr_all( + "det", + r""" +det() -> Tensor + +See :func:`torch.det` +""", +) + +add_docstr_all( + "where", + r""" +where(condition, y) -> Tensor + +``self.where(condition, y)`` is equivalent to ``torch.where(condition, self, y)``. +See :func:`torch.where` +""", +) + +add_docstr_all( + "logdet", + r""" +logdet() -> Tensor + +See :func:`torch.logdet` +""", +) + +add_docstr_all( + "slogdet", + r""" +slogdet() -> (Tensor, Tensor) + +See :func:`torch.slogdet` +""", +) + +add_docstr_all( + "unbind", + r""" +unbind(dim=0) -> seq + +See :func:`torch.unbind` +""", +) + +add_docstr_all( + "pin_memory", + r""" +pin_memory() -> Tensor + +Copies the tensor to pinned memory, if it's not already pinned. +""", +) + +add_docstr_all( + "pinverse", + r""" +pinverse() -> Tensor + +See :func:`torch.pinverse` +""", +) + +add_docstr_all( + "index_add", + r""" +index_add(dim, index, source, *, alpha=1) -> Tensor + +Out-of-place version of :meth:`torch.Tensor.index_add_`. +""", +) + +add_docstr_all( + "index_copy", + r""" +index_copy(dim, index, tensor2) -> Tensor + +Out-of-place version of :meth:`torch.Tensor.index_copy_`. +""", +) + +add_docstr_all( + "index_fill", + r""" +index_fill(dim, index, value) -> Tensor + +Out-of-place version of :meth:`torch.Tensor.index_fill_`. +""", +) + +add_docstr_all( + "scatter", + r""" +scatter(dim, index, src) -> Tensor + +Out-of-place version of :meth:`torch.Tensor.scatter_` +""", +) + +add_docstr_all( + "scatter_add", + r""" +scatter_add(dim, index, src) -> Tensor + +Out-of-place version of :meth:`torch.Tensor.scatter_add_` +""", +) + +add_docstr_all( + "scatter_reduce", + r""" +scatter_reduce(dim, index, src, reduce, *, include_self=True) -> Tensor + +Out-of-place version of :meth:`torch.Tensor.scatter_reduce_` +""", +) + +add_docstr_all( + "masked_scatter", + r""" +masked_scatter(mask, tensor) -> Tensor + +Out-of-place version of :meth:`torch.Tensor.masked_scatter_` + +.. note:: + + The inputs :attr:`self` and :attr:`mask` + :ref:`broadcast `. + +Example: + + >>> self = torch.tensor([0, 0, 0, 0, 0]) + >>> mask = torch.tensor([[0, 0, 0, 1, 1], [1, 1, 0, 1, 1]]) + >>> source = torch.tensor([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) + >>> self.masked_scatter(mask, source) + tensor([[0, 0, 0, 0, 1], + [2, 3, 0, 4, 5]]) + +""", +) + +add_docstr_all( + "xlogy", + r""" +xlogy(other) -> Tensor + +See :func:`torch.xlogy` +""", +) + +add_docstr_all( + "xlogy_", + r""" +xlogy_(other) -> Tensor + +In-place version of :meth:`~Tensor.xlogy` +""", +) + +add_docstr_all( + "masked_fill", + r""" +masked_fill(mask, value) -> Tensor + +Out-of-place version of :meth:`torch.Tensor.masked_fill_` +""", +) + +add_docstr_all( + "grad", + r""" +This attribute is ``None`` by default and becomes a Tensor the first time a call to +:func:`backward` computes gradients for ``self``. +The attribute will then contain the gradients computed and future calls to +:func:`backward` will accumulate (add) gradients into it. +""", +) + +add_docstr_all( + "retain_grad", + r""" +retain_grad() -> None + +Enables this Tensor to have their :attr:`grad` populated during +:func:`backward`. This is a no-op for leaf tensors. +""", +) + +add_docstr_all( + "retains_grad", + r""" +Is ``True`` if this Tensor is non-leaf and its :attr:`grad` is enabled to be +populated during :func:`backward`, ``False`` otherwise. +""", +) + +add_docstr_all( + "requires_grad", + r""" +Is ``True`` if gradients need to be computed for this Tensor, ``False`` otherwise. + +.. note:: + + The fact that gradients need to be computed for a Tensor do not mean that the :attr:`grad` + attribute will be populated, see :attr:`is_leaf` for more details. + +""", +) + +add_docstr_all( + "is_leaf", + r""" +All Tensors that have :attr:`requires_grad` which is ``False`` will be leaf Tensors by convention. + +For Tensors that have :attr:`requires_grad` which is ``True``, they will be leaf Tensors if they were +created by the user. This means that they are not the result of an operation and so +:attr:`grad_fn` is None. + +Only leaf Tensors will have their :attr:`grad` populated during a call to :func:`backward`. +To get :attr:`grad` populated for non-leaf Tensors, you can use :func:`retain_grad`. + +Example:: + + >>> a = torch.rand(10, requires_grad=True) + >>> a.is_leaf + True + >>> b = torch.rand(10, requires_grad=True).cuda() + >>> b.is_leaf + False + # b was created by the operation that cast a cpu Tensor into a cuda Tensor + >>> c = torch.rand(10, requires_grad=True) + 2 + >>> c.is_leaf + False + # c was created by the addition operation + >>> d = torch.rand(10).cuda() + >>> d.is_leaf + True + # d does not require gradients and so has no operation creating it (that is tracked by the autograd engine) + >>> e = torch.rand(10).cuda().requires_grad_() + >>> e.is_leaf + True + # e requires gradients and has no operations creating it + >>> f = torch.rand(10, requires_grad=True, device="cuda") + >>> f.is_leaf + True + # f requires grad, has no operation creating it + + +""", +) + +add_docstr_all( + "names", + r""" +Stores names for each of this tensor's dimensions. + +``names[idx]`` corresponds to the name of tensor dimension ``idx``. +Names are either a string if the dimension is named or ``None`` if the +dimension is unnamed. + +Dimension names may contain characters or underscore. Furthermore, a dimension +name must be a valid Python variable name (i.e., does not start with underscore). + +Tensors may not have two named dimensions with the same name. + +.. warning:: + The named tensor API is experimental and subject to change. + +""", +) + +add_docstr_all( + "is_cuda", + r""" +Is ``True`` if the Tensor is stored on the GPU, ``False`` otherwise. +""", +) + +add_docstr_all( + "is_cpu", + r""" +Is ``True`` if the Tensor is stored on the CPU, ``False`` otherwise. +""", +) + +add_docstr_all( + "is_xla", + r""" +Is ``True`` if the Tensor is stored on an XLA device, ``False`` otherwise. +""", +) + +add_docstr_all( + "is_ipu", + r""" +Is ``True`` if the Tensor is stored on the IPU, ``False`` otherwise. +""", +) + +add_docstr_all( + "is_xpu", + r""" +Is ``True`` if the Tensor is stored on the XPU, ``False`` otherwise. +""", +) + +add_docstr_all( + "is_quantized", + r""" +Is ``True`` if the Tensor is quantized, ``False`` otherwise. +""", +) + +add_docstr_all( + "is_meta", + r""" +Is ``True`` if the Tensor is a meta tensor, ``False`` otherwise. Meta tensors +are like normal tensors, but they carry no data. +""", +) + +add_docstr_all( + "is_mps", + r""" +Is ``True`` if the Tensor is stored on the MPS device, ``False`` otherwise. +""", +) + +add_docstr_all( + "is_sparse", + r""" +Is ``True`` if the Tensor uses sparse COO storage layout, ``False`` otherwise. +""", +) + +add_docstr_all( + "is_sparse_csr", + r""" +Is ``True`` if the Tensor uses sparse CSR storage layout, ``False`` otherwise. +""", +) + +add_docstr_all( + "device", + r""" +Is the :class:`torch.device` where this Tensor is. +""", +) + +add_docstr_all( + "ndim", + r""" +Alias for :meth:`~Tensor.dim()` +""", +) + +add_docstr_all( + "itemsize", + r""" +Alias for :meth:`~Tensor.element_size()` +""", +) + +add_docstr_all( + "nbytes", + r""" +Returns the number of bytes consumed by the "view" of elements of the Tensor +if the Tensor does not use sparse storage layout. +Defined to be :meth:`~Tensor.numel()` * :meth:`~Tensor.element_size()` +""", +) + +add_docstr_all( + "T", + r""" +Returns a view of this tensor with its dimensions reversed. + +If ``n`` is the number of dimensions in ``x``, +``x.T`` is equivalent to ``x.permute(n-1, n-2, ..., 0)``. + +.. warning:: + The use of :func:`Tensor.T` on tensors of dimension other than 2 to reverse their shape + is deprecated and it will throw an error in a future release. Consider :attr:`~.Tensor.mT` + to transpose batches of matrices or `x.permute(*torch.arange(x.ndim - 1, -1, -1))` to reverse + the dimensions of a tensor. +""", +) + +add_docstr_all( + "H", + r""" +Returns a view of a matrix (2-D tensor) conjugated and transposed. + +``x.H`` is equivalent to ``x.transpose(0, 1).conj()`` for complex matrices and +``x.transpose(0, 1)`` for real matrices. + +.. seealso:: + + :attr:`~.Tensor.mH`: An attribute that also works on batches of matrices. +""", +) + +add_docstr_all( + "mT", + r""" +Returns a view of this tensor with the last two dimensions transposed. + +``x.mT`` is equivalent to ``x.transpose(-2, -1)``. +""", +) + +add_docstr_all( + "mH", + r""" +Accessing this property is equivalent to calling :func:`adjoint`. +""", +) + +add_docstr_all( + "adjoint", + r""" +adjoint() -> Tensor + +Alias for :func:`adjoint` +""", +) + +add_docstr_all( + "real", + r""" +Returns a new tensor containing real values of the :attr:`self` tensor for a complex-valued input tensor. +The returned tensor and :attr:`self` share the same underlying storage. + +Returns :attr:`self` if :attr:`self` is a real-valued tensor tensor. + +Example:: + >>> x=torch.randn(4, dtype=torch.cfloat) + >>> x + tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)]) + >>> x.real + tensor([ 0.3100, -0.5445, -1.6492, -0.0638]) + +""", +) + +add_docstr_all( + "imag", + r""" +Returns a new tensor containing imaginary values of the :attr:`self` tensor. +The returned tensor and :attr:`self` share the same underlying storage. + +.. warning:: + :func:`imag` is only supported for tensors with complex dtypes. + +Example:: + >>> x=torch.randn(4, dtype=torch.cfloat) + >>> x + tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)]) + >>> x.imag + tensor([ 0.3553, -0.7896, -0.0633, -0.8119]) + +""", +) + +add_docstr_all( + "as_subclass", + r""" +as_subclass(cls) -> Tensor + +Makes a ``cls`` instance with the same data pointer as ``self``. Changes +in the output mirror changes in ``self``, and the output stays attached +to the autograd graph. ``cls`` must be a subclass of ``Tensor``. +""", +) + +add_docstr_all( + "crow_indices", + r""" +crow_indices() -> IntTensor + +Returns the tensor containing the compressed row indices of the :attr:`self` +tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``. +The ``crow_indices`` tensor is strictly of shape (:attr:`self`.size(0) + 1) +and of type ``int32`` or ``int64``. When using MKL routines such as sparse +matrix multiplication, it is necessary to use ``int32`` indexing in order +to avoid downcasting and potentially losing information. + +Example:: + >>> csr = torch.eye(5,5).to_sparse_csr() + >>> csr.crow_indices() + tensor([0, 1, 2, 3, 4, 5], dtype=torch.int32) + +""", +) + +add_docstr_all( + "col_indices", + r""" +col_indices() -> IntTensor + +Returns the tensor containing the column indices of the :attr:`self` +tensor when :attr:`self` is a sparse CSR tensor of layout ``sparse_csr``. +The ``col_indices`` tensor is strictly of shape (:attr:`self`.nnz()) +and of type ``int32`` or ``int64``. When using MKL routines such as sparse +matrix multiplication, it is necessary to use ``int32`` indexing in order +to avoid downcasting and potentially losing information. + +Example:: + >>> csr = torch.eye(5,5).to_sparse_csr() + >>> csr.col_indices() + tensor([0, 1, 2, 3, 4], dtype=torch.int32) + +""", +) + +add_docstr_all( + "to_padded_tensor", + r""" +to_padded_tensor(padding, output_size=None) -> Tensor +See :func:`to_padded_tensor` +""", +) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_tensor_str.py b/llmeval-env/lib/python3.10/site-packages/torch/_tensor_str.py new file mode 100644 index 0000000000000000000000000000000000000000..6903b49715ecda81bd55db9d8e85126f74c2eb46 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_tensor_str.py @@ -0,0 +1,697 @@ +import contextlib +import dataclasses +import math +import textwrap +from typing import Any, Dict, Optional + +import torch +from torch import inf + + +@dataclasses.dataclass +class __PrinterOptions: + precision: int = 4 + threshold: float = 1000 + edgeitems: int = 3 + linewidth: int = 80 + sci_mode: Optional[bool] = None + + +PRINT_OPTS = __PrinterOptions() + + +# We could use **kwargs, but this will give better docs +def set_printoptions( + precision=None, + threshold=None, + edgeitems=None, + linewidth=None, + profile=None, + sci_mode=None, +): + r"""Set options for printing. Items shamelessly taken from NumPy + + Args: + precision: Number of digits of precision for floating point output + (default = 4). + threshold: Total number of array elements which trigger summarization + rather than full `repr` (default = 1000). + edgeitems: Number of array items in summary at beginning and end of + each dimension (default = 3). + linewidth: The number of characters per line for the purpose of + inserting line breaks (default = 80). Thresholded matrices will + ignore this parameter. + profile: Sane defaults for pretty printing. Can override with any of + the above options. (any one of `default`, `short`, `full`) + sci_mode: Enable (True) or disable (False) scientific notation. If + None (default) is specified, the value is defined by + `torch._tensor_str._Formatter`. This value is automatically chosen + by the framework. + + Example:: + + >>> # Limit the precision of elements + >>> torch.set_printoptions(precision=2) + >>> torch.tensor([1.12345]) + tensor([1.12]) + >>> # Limit the number of elements shown + >>> torch.set_printoptions(threshold=5) + >>> torch.arange(10) + tensor([0, 1, 2, ..., 7, 8, 9]) + >>> # Restore defaults + >>> torch.set_printoptions(profile='default') + >>> torch.tensor([1.12345]) + tensor([1.1235]) + >>> torch.arange(10) + tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + + """ + if profile is not None: + if profile == "default": + PRINT_OPTS.precision = 4 + PRINT_OPTS.threshold = 1000 + PRINT_OPTS.edgeitems = 3 + PRINT_OPTS.linewidth = 80 + elif profile == "short": + PRINT_OPTS.precision = 2 + PRINT_OPTS.threshold = 1000 + PRINT_OPTS.edgeitems = 2 + PRINT_OPTS.linewidth = 80 + elif profile == "full": + PRINT_OPTS.precision = 4 + PRINT_OPTS.threshold = inf + PRINT_OPTS.edgeitems = 3 + PRINT_OPTS.linewidth = 80 + + if precision is not None: + PRINT_OPTS.precision = precision + if threshold is not None: + PRINT_OPTS.threshold = threshold + if edgeitems is not None: + PRINT_OPTS.edgeitems = edgeitems + if linewidth is not None: + PRINT_OPTS.linewidth = linewidth + PRINT_OPTS.sci_mode = sci_mode + + +def get_printoptions() -> Dict[str, Any]: + r"""Gets the current options for printing, as a dictionary that + can be passed as ``**kwargs`` to set_printoptions(). + """ + return dataclasses.asdict(PRINT_OPTS) + + +@contextlib.contextmanager +def printoptions(**kwargs): + r"""Context manager that temporarily changes the print options. Accepted + arguments are same as :func:`set_printoptions`.""" + old_kwargs = get_printoptions() + set_printoptions(**kwargs) + try: + yield + finally: + set_printoptions(**old_kwargs) + + +def tensor_totype(t): + dtype = torch.float if t.is_mps else torch.double + return t.to(dtype=dtype) + + +class _Formatter: + def __init__(self, tensor): + self.floating_dtype = tensor.dtype.is_floating_point + self.int_mode = True + self.sci_mode = False + self.max_width = 1 + + with torch.no_grad(): + tensor_view = tensor.reshape(-1) + + if not self.floating_dtype: + for value in tensor_view: + value_str = f"{value}" + self.max_width = max(self.max_width, len(value_str)) + + else: + nonzero_finite_vals = torch.masked_select( + tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0) + ) + + if nonzero_finite_vals.numel() == 0: + # no valid number, do nothing + return + + # Convert to double for easy calculation. HalfTensor overflows with 1e8, and there's no div() on CPU. + nonzero_finite_abs = tensor_totype(nonzero_finite_vals.abs()) + nonzero_finite_min = tensor_totype(nonzero_finite_abs.min()) + nonzero_finite_max = tensor_totype(nonzero_finite_abs.max()) + + for value in nonzero_finite_vals: + if value != torch.ceil(value): + self.int_mode = False + break + + if self.int_mode: + # in int_mode for floats, all numbers are integers, and we append a decimal to nonfinites + # to indicate that the tensor is of floating type. add 1 to the len to account for this. + if ( + nonzero_finite_max / nonzero_finite_min > 1000.0 + or nonzero_finite_max > 1.0e8 + ): + self.sci_mode = True + for value in nonzero_finite_vals: + value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value) + self.max_width = max(self.max_width, len(value_str)) + else: + for value in nonzero_finite_vals: + value_str = f"{value:.0f}" + self.max_width = max(self.max_width, len(value_str) + 1) + else: + # Check if scientific representation should be used. + if ( + nonzero_finite_max / nonzero_finite_min > 1000.0 + or nonzero_finite_max > 1.0e8 + or nonzero_finite_min < 1.0e-4 + ): + self.sci_mode = True + for value in nonzero_finite_vals: + value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value) + self.max_width = max(self.max_width, len(value_str)) + else: + for value in nonzero_finite_vals: + value_str = f"{{:.{PRINT_OPTS.precision}f}}".format(value) + self.max_width = max(self.max_width, len(value_str)) + + if PRINT_OPTS.sci_mode is not None: + self.sci_mode = PRINT_OPTS.sci_mode + + def width(self): + return self.max_width + + def format(self, value): + if self.floating_dtype: + if self.sci_mode: + ret = f"{{:{self.max_width}.{PRINT_OPTS.precision}e}}".format(value) + elif self.int_mode: + ret = f"{value:.0f}" + if not (math.isinf(value) or math.isnan(value)): + ret += "." + else: + ret = f"{{:.{PRINT_OPTS.precision}f}}".format(value) + else: + ret = f"{value}" + return (self.max_width - len(ret)) * " " + ret + + +def _scalar_str(self, formatter1, formatter2=None): + if formatter2 is not None: + real_str = _scalar_str(self.real, formatter1) + imag_str = (_scalar_str(self.imag, formatter2) + "j").lstrip() + # handles negative numbers, +0.0, -0.0 + if imag_str[0] == "+" or imag_str[0] == "-": + return real_str + imag_str + else: + return real_str + "+" + imag_str + else: + return formatter1.format(self.item()) + + +def _vector_str(self, indent, summarize, formatter1, formatter2=None): + # length includes spaces and comma between elements + element_length = formatter1.width() + 2 + if formatter2 is not None: + # width for imag_formatter + an extra j for complex + element_length += formatter2.width() + 1 + + elements_per_line = max( + 1, int(math.floor((PRINT_OPTS.linewidth - indent) / (element_length))) + ) + + def _val_formatter(val, formatter1=formatter1, formatter2=formatter2): + if formatter2 is not None: + real_str = formatter1.format(val.real) + imag_str = (formatter2.format(val.imag) + "j").lstrip() + # handles negative numbers, +0.0, -0.0 + if imag_str[0] == "+" or imag_str[0] == "-": + return real_str + imag_str + else: + return real_str + "+" + imag_str + else: + return formatter1.format(val) + + if summarize and not PRINT_OPTS.edgeitems: + # Deal with edge case that negative zero is zero + data = ["..."] + elif summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems: + data = ( + [_val_formatter(val) for val in self[: PRINT_OPTS.edgeitems].tolist()] + + [" ..."] + + [_val_formatter(val) for val in self[-PRINT_OPTS.edgeitems :].tolist()] + ) + else: + data = [_val_formatter(val) for val in self.tolist()] + + data_lines = [ + data[i : i + elements_per_line] for i in range(0, len(data), elements_per_line) + ] + lines = [", ".join(line) for line in data_lines] + return "[" + ("," + "\n" + " " * (indent + 1)).join(lines) + "]" + + +# formatter2 is only used for printing complex tensors. +# For complex tensors, formatter1 and formatter2 are the formatters for tensor.real +# and tensor.imag respesectively +def _tensor_str_with_formatter(self, indent, summarize, formatter1, formatter2=None): + dim = self.dim() + + if dim == 0: + return _scalar_str(self, formatter1, formatter2) + + if dim == 1: + return _vector_str(self, indent, summarize, formatter1, formatter2) + + if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems: + slices = ( + [ + _tensor_str_with_formatter( + self[i], indent + 1, summarize, formatter1, formatter2 + ) + for i in range(0, PRINT_OPTS.edgeitems) + ] + + ["..."] + + [ + _tensor_str_with_formatter( + self[i], indent + 1, summarize, formatter1, formatter2 + ) + for i in range(len(self) - PRINT_OPTS.edgeitems, len(self)) + ] + ) + else: + slices = [ + _tensor_str_with_formatter( + self[i], indent + 1, summarize, formatter1, formatter2 + ) + for i in range(0, self.size(0)) + ] + + tensor_str = ("," + "\n" * (dim - 1) + " " * (indent + 1)).join(slices) + return "[" + tensor_str + "]" + + +def _tensor_str(self, indent): + if self.numel() == 0: + return "[]" + + if self.has_names(): + # There are two main codepaths (possibly more) that tensor printing goes through: + # - tensor data can fit comfortably on screen + # - tensor data needs to be summarized + # Some of the codepaths don't fully support named tensors, so we send in + # an unnamed tensor to the formatting code as a workaround. + self = self.rename(None) + + summarize = self.numel() > PRINT_OPTS.threshold + + if self._is_zerotensor(): + self = self.clone() + + # handle the negative bit + if self.is_neg(): + self = self.resolve_neg() + + if self.dtype in [ + torch.float16, + torch.bfloat16, + torch.float8_e5m2, + torch.float8_e5m2fnuz, + torch.float8_e4m3fn, + torch.float8_e4m3fnuz, + ]: + self = self.float() + + if self.dtype is torch.complex32: + self = self.cfloat() + + if self.dtype.is_complex: + # handle the conjugate bit + self = self.resolve_conj() + real_formatter = _Formatter( + get_summarized_data(self.real) if summarize else self.real + ) + imag_formatter = _Formatter( + get_summarized_data(self.imag) if summarize else self.imag + ) + return _tensor_str_with_formatter( + self, indent, summarize, real_formatter, imag_formatter + ) + else: + formatter = _Formatter(get_summarized_data(self) if summarize else self) + return _tensor_str_with_formatter(self, indent, summarize, formatter) + + +def _add_suffixes(tensor_str, suffixes, indent, force_newline): + tensor_strs = [tensor_str] + last_line_len = len(tensor_str) - tensor_str.rfind("\n") + 1 + for suffix in suffixes: + suffix_len = len(suffix) + if force_newline or last_line_len + suffix_len + 2 > PRINT_OPTS.linewidth: + tensor_strs.append(",\n" + " " * indent + suffix) + last_line_len = indent + suffix_len + force_newline = False + else: + tensor_strs.append(", " + suffix) + last_line_len += suffix_len + 2 + tensor_strs.append(")") + return "".join(tensor_strs) + + +def get_summarized_data(self): + dim = self.dim() + if dim == 0: + return self + if dim == 1: + if self.size(0) > 2 * PRINT_OPTS.edgeitems: + return torch.cat( + (self[: PRINT_OPTS.edgeitems], self[-PRINT_OPTS.edgeitems :]) + ) + else: + return self + if not PRINT_OPTS.edgeitems: + return self.new_empty([0] * self.dim()) + elif self.size(0) > 2 * PRINT_OPTS.edgeitems: + start = [self[i] for i in range(0, PRINT_OPTS.edgeitems)] + end = [self[i] for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))] + return torch.stack([get_summarized_data(x) for x in (start + end)]) + else: + return torch.stack([get_summarized_data(x) for x in self]) + + +def _str_intern(inp, *, tensor_contents=None): + if torch._C._functorch.is_functorch_wrapped_tensor(inp): + return _functorch_wrapper_str_intern(inp, tensor_contents=tensor_contents) + is_plain_tensor = type(inp) is torch.Tensor or type(inp) is torch.nn.Parameter + if inp.is_nested: + prefix = "nested_tensor(" + elif is_plain_tensor: + prefix = "tensor(" + else: + prefix = f"{type(inp).__name__}(" + indent = len(prefix) + suffixes = [] + custom_contents_provided = tensor_contents is not None + if custom_contents_provided: + tensor_str = tensor_contents + + # This is used to extract the primal value and thus disable the forward AD + # within this function. + # TODO(albanD) This needs to be updated when more than one level is supported + self, tangent = torch.autograd.forward_ad.unpack_dual(inp) + + # Note [Print tensor device]: + # A general logic here is we only print device when it doesn't match + # the device specified in default tensor type. + # Currently torch.set_default_tensor_type() only supports CPU/CUDA, thus + # torch._C._get_default_device() only returns either cpu or cuda. + # In other cases, we don't have a way to set them as default yet, + # and we should always print out device for them. + if ( + self.device.type != torch._C._get_default_device() + or ( + self.device.type == "cuda" + and torch.cuda.current_device() != self.device.index + ) + or (self.device.type == "mps") + ): + suffixes.append("device='" + str(self.device) + "'") + + # Tensor printing performs tensor operations like slice, indexing, etc to make it in a + # representable format. These operations on ipu/xla/lazy/mtia tensor results in compilations. Hence, + # to avoid compilations, copying the tensor to cpu before printing. + if self.device.type in ["xla", "lazy", "ipu", "mtia"]: + self = self.to("cpu") + + # TODO: add an API to map real -> complex dtypes + _default_complex_dtype = ( + torch.cdouble if torch.get_default_dtype() == torch.double else torch.cfloat + ) + has_default_dtype = self.dtype in ( + torch.get_default_dtype(), + _default_complex_dtype, + torch.int64, + torch.bool, + ) + if self.is_sparse: + suffixes.append("size=" + str(tuple(self.shape))) + from torch._subclasses.fake_tensor import FakeTensor + + is_meta = self.is_meta or isinstance(self, FakeTensor) + if not is_meta: + suffixes.append("nnz=" + str(self._nnz())) + if not has_default_dtype: + suffixes.append("dtype=" + str(self.dtype)) + if not custom_contents_provided: + indices_prefix = "indices=tensor(" + indices = self._indices().detach() + if is_meta: + indices_str = "..." + else: + indices_str = _tensor_str(indices, indent + len(indices_prefix)) + if indices.numel() == 0 or is_meta: + indices_str += ", size=" + str(tuple(indices.shape)) + values_prefix = "values=tensor(" + values = self._values().detach() + if is_meta: + values_str = "..." + else: + values_str = _tensor_str(values, indent + len(values_prefix)) + if values.numel() == 0 or is_meta: + values_str += ", size=" + str(tuple(values.shape)) + tensor_str = ( + indices_prefix + + indices_str + + "),\n" + + " " * indent + + values_prefix + + values_str + + ")" + ) + elif self.layout in { + torch.sparse_csr, + torch.sparse_csc, + torch.sparse_bsr, + torch.sparse_bsc, + }: + from torch._subclasses.fake_tensor import FakeTensor + + suffixes.append("size=" + str(tuple(self.shape))) + is_meta = self.is_meta or isinstance(self, FakeTensor) + if not is_meta: + suffixes.append("nnz=" + str(self._nnz())) + if not has_default_dtype: + suffixes.append("dtype=" + str(self.dtype)) + if not custom_contents_provided: + compressed_indices_method, plain_indices_method = { + torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices), + torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices), + torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices), + torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices), + }[self.layout] + if self.layout in {torch.sparse_csr, torch.sparse_bsr}: + cdimname, pdimname = "row", "column" + else: + cdimname, pdimname = "column", "row" + compressed_indices_prefix = f"c{cdimname[:3]}_indices=tensor(" + compressed_indices = compressed_indices_method(self).detach() + if is_meta: + compressed_indices_str = "..." + else: + compressed_indices_str = _tensor_str( + compressed_indices, indent + len(compressed_indices_prefix) + ) + if compressed_indices.numel() == 0 or is_meta: + compressed_indices_str += ", size=" + str( + tuple(compressed_indices.shape) + ) + plain_indices_prefix = f"{pdimname[:3]}_indices=tensor(" + plain_indices = plain_indices_method(self).detach() + if is_meta: + plain_indices_str = "..." + else: + plain_indices_str = _tensor_str( + plain_indices, indent + len(plain_indices_prefix) + ) + if plain_indices.numel() == 0 or is_meta: + plain_indices_str += ", size=" + str(tuple(plain_indices.shape)) + values_prefix = "values=tensor(" + values = self.values().detach() + if is_meta: + values_str = "..." + else: + values_str = _tensor_str(values, indent + len(values_prefix)) + if values.numel() == 0 or is_meta: + values_str += ", size=" + str(tuple(values.shape)) + tensor_str = ( + compressed_indices_prefix + + compressed_indices_str + + "),\n" + + " " * indent + + plain_indices_prefix + + plain_indices_str + + "),\n" + + " " * indent + + values_prefix + + values_str + + ")" + ) + elif self.is_quantized: + suffixes.append("size=" + str(tuple(self.shape))) + if not has_default_dtype: + suffixes.append("dtype=" + str(self.dtype)) + suffixes.append("quantization_scheme=" + str(self.qscheme())) + if ( + self.qscheme() == torch.per_tensor_affine + or self.qscheme() == torch.per_tensor_symmetric + ): + suffixes.append("scale=" + str(self.q_scale())) + suffixes.append("zero_point=" + str(self.q_zero_point())) + elif ( + self.qscheme() == torch.per_channel_affine + or self.qscheme() == torch.per_channel_symmetric + or self.qscheme() == torch.per_channel_affine_float_qparams + ): + suffixes.append("scale=" + str(self.q_per_channel_scales())) + suffixes.append("zero_point=" + str(self.q_per_channel_zero_points())) + suffixes.append("axis=" + str(self.q_per_channel_axis())) + if not custom_contents_provided: + tensor_str = _tensor_str(self.dequantize(), indent) + elif self.is_nested: + if not custom_contents_provided: + + def indented_str(s, indent): + return "\n".join(f" {line}" for line in s.split("\n")) + + strs = ",\n".join( + indented_str(str(t), indent + 1) + for t in torch.ops.aten.unbind.int(self, 0) + ) + tensor_str = f"[\n{strs}\n]" + elif torch._is_functional_tensor(self): + prefix = "_to_functional_tensor(" + tensor_str = repr(torch._from_functional_tensor(self)) + else: + # Circular import problem, so we import it here + from torch._subclasses.fake_tensor import FakeTensor + + if self.is_meta or isinstance(self, FakeTensor): + suffixes.append("size=" + str(tuple(self.shape))) + if self.dtype != torch.get_default_dtype(): + suffixes.append("dtype=" + str(self.dtype)) + # TODO: This implies that ellipses is valid syntax for allocating + # a meta tensor or FakeTensor, which it could be, but it isn't right now + if not custom_contents_provided: + tensor_str = "..." + else: + if self.numel() == 0 and not self.is_sparse: + # Explicitly print the shape if it is not (0,), to match NumPy behavior + if self.dim() != 1: + suffixes.append("size=" + str(tuple(self.shape))) + + # In an empty tensor, there are no elements to infer if the dtype + # should be int64, so it must be shown explicitly. + if self.dtype != torch.get_default_dtype(): + suffixes.append("dtype=" + str(self.dtype)) + if not custom_contents_provided: + tensor_str = "[]" + else: + if not PRINT_OPTS.edgeitems: + suffixes.append("size=" + str(tuple(self.shape))) + + if not has_default_dtype: + suffixes.append("dtype=" + str(self.dtype)) + + if not custom_contents_provided: + if self.layout != torch.strided: + tensor_str = _tensor_str(self.to_dense(), indent) + else: + tensor_str = _tensor_str(self, indent) + + if self.layout != torch.strided: + suffixes.append("layout=" + str(self.layout)) + + # Use inp here to get the original grad_fn and not the one generated by the forward grad + # unpacking. + grad_fn_name = None + try: + grad_fn = inp.grad_fn + except RuntimeError: + # Accessing the grad_fn calls rebasing logic which would cause an error + # if that tensor is a view created in no-grad mode modified in-place in + # no-grad mode. See: https://github.com/pytorch/pytorch/issues/99968 + grad_fn_name = "Invalid" + + if grad_fn_name is None and grad_fn is not None: # type: ignore[possibly-undefined] + grad_fn_name = type(grad_fn).__name__ + if grad_fn_name == "CppFunction": + grad_fn_name = grad_fn.name().rsplit("::", 1)[-1] + + if grad_fn_name is not None: + suffixes.append(f"grad_fn=<{grad_fn_name}>") + elif inp.requires_grad: + suffixes.append("requires_grad=True") + + if self.has_names(): + suffixes.append(f"names={self.names}") + + if tangent is not None: + suffixes.append(f"tangent={tangent}") + + string_repr = _add_suffixes( + prefix + tensor_str, suffixes, indent, force_newline=self.is_sparse # type: ignore[possibly-undefined] + ) + + # Check if this instance is flagged as a parameter and change the repr accordingly. + # Unfortunately, this function has to be aware of this detail. + # NB: This is currently skipped for plain tensor parameters to maintain BC. In the future, + # this should be done for those as well to produce a valid repr. + if isinstance(self, torch.nn.Parameter) and not is_plain_tensor: + string_repr = f"Parameter({string_repr})" + + return string_repr + + +def _functorch_wrapper_str_intern(tensor, *, tensor_contents=None): + level = torch._C._functorch.maybe_get_level(tensor) + assert level != -1 + + if torch._C._functorch.is_functionaltensor(tensor): + # Since we're unwrapping the FunctionalTensorWrapper, we need to make sure + # that it's up to date first + torch._sync(tensor) + + value = torch._C._functorch.get_unwrapped(tensor) + value_repr = repr(value) + + indented_value_repr = textwrap.indent(value_repr, " " * 4) + if torch._C._functorch.is_batchedtensor(tensor): + bdim = torch._C._functorch.maybe_get_bdim(tensor) + assert bdim != -1 + return ( + f"BatchedTensor(lvl={level}, bdim={bdim}, value=\n" + f"{indented_value_repr}\n" + f")" + ) + if torch._C._functorch.is_gradtrackingtensor(tensor): + return ( + f"GradTrackingTensor(lvl={level}, value=\n" f"{indented_value_repr}\n" f")" + ) + if torch._C._functorch.is_functionaltensor(tensor): + return f"FunctionalTensor(lvl={level}, value=\\\n{value_repr})" + + raise ValueError("We don't know how to print this, please file us an issue") + + +def _str(self, *, tensor_contents=None): + with torch.no_grad(), torch.utils._python_dispatch._disable_current_modes(): + guard = torch._C._DisableFuncTorch() + return _str_intern(self, tensor_contents=tensor_contents) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_torch_docs.py b/llmeval-env/lib/python3.10/site-packages/torch/_torch_docs.py new file mode 100644 index 0000000000000000000000000000000000000000..dee69d9c408fe7c1ae2ac16fe5cd0788aaf1e108 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_torch_docs.py @@ -0,0 +1,14192 @@ +"""Adds docstrings to functions defined in the torch._C module.""" + +import re + +import torch._C +from torch._C import _add_docstr as add_docstr + + +def parse_kwargs(desc): + r"""Map a description of args to a dictionary of {argname: description}. + + Input: + (' weight (Tensor): a weight tensor\n' + + ' Some optional description') + Output: { + 'weight': \ + 'weight (Tensor): a weight tensor\n Some optional description' + } + """ + # Split on exactly 4 spaces after a newline + regx = re.compile(r"\n\s{4}(?!\s)") + kwargs = [section.strip() for section in regx.split(desc)] + kwargs = [section for section in kwargs if len(section) > 0] + return {desc.split(" ")[0]: desc for desc in kwargs} + + +def merge_dicts(*dicts): + """Merge dictionaries into a single dictionary.""" + return {x: d[x] for d in dicts for x in d} + + +common_args = parse_kwargs( + """ + input (Tensor): the input tensor. + generator (:class:`torch.Generator`, optional): a pseudorandom number generator for sampling + out (Tensor, optional): the output tensor. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned tensor. Default: ``torch.preserve_format``. +""" +) + +reduceops_common_args = merge_dicts( + common_args, + parse_kwargs( + """ + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + If specified, the input tensor is casted to :attr:`dtype` before the operation + is performed. This is useful for preventing data type overflows. Default: None. + keepdim (bool): whether the output tensor has :attr:`dim` retained or not. +""" + ), +) + +multi_dim_common = merge_dicts( + reduceops_common_args, + parse_kwargs( + """ + dim (int or tuple of ints): the dimension or dimensions to reduce. +""" + ), + { + "keepdim_details": """ +If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension(s) :attr:`dim` where it is of size 1. +Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in the +output tensor having 1 (or ``len(dim)``) fewer dimension(s). +""" + }, + { + "opt_dim": """ + dim (int or tuple of ints, optional): the dimension or dimensions to reduce. + If ``None``, all dimensions are reduced. +""" + }, +) + +single_dim_common = merge_dicts( + reduceops_common_args, + parse_kwargs( + """ + dim (int): the dimension to reduce. +""" + ), + { + "keepdim_details": """If :attr:`keepdim` is ``True``, the output tensor is of the same size +as :attr:`input` except in the dimension :attr:`dim` where it is of size 1. +Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in +the output tensor having 1 fewer dimension than :attr:`input`.""" + }, +) + +factory_common_args = merge_dicts( + common_args, + parse_kwargs( + """ + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, uses a global default (see :func:`torch.set_default_dtype`). + layout (:class:`torch.layout`, optional): the desired layout of returned Tensor. + Default: ``torch.strided``. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.contiguous_format``. + check_invariants (bool, optional): If sparse tensor invariants are checked. + Default: as returned by :func:`torch.sparse.check_sparse_tensor_invariants.is_enabled`, + initially False. +""" + ), + { + "sparse_factory_device_note": """\ +.. note:: + + If the ``device`` argument is not specified the device of the given + :attr:`values` and indices tensor(s) must match. If, however, the + argument is specified the input Tensors will be converted to the + given device and in turn determine the device of the constructed + sparse tensor.""" + }, +) + +factory_like_common_args = parse_kwargs( + """ + input (Tensor): the size of :attr:`input` will determine size of the output tensor. + layout (:class:`torch.layout`, optional): the desired layout of returned tensor. + Default: if ``None``, defaults to the layout of :attr:`input`. + dtype (:class:`torch.dtype`, optional): the desired data type of returned Tensor. + Default: if ``None``, defaults to the dtype of :attr:`input`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, defaults to the device of :attr:`input`. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. + memory_format (:class:`torch.memory_format`, optional): the desired memory format of + returned Tensor. Default: ``torch.preserve_format``. +""" +) + +factory_data_common_args = parse_kwargs( + """ + data (array_like): Initial data for the tensor. Can be a list, tuple, + NumPy ``ndarray``, scalar, and other types. + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, infers data type from :attr:`data`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if ``None``, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + requires_grad (bool, optional): If autograd should record operations on the + returned tensor. Default: ``False``. + pin_memory (bool, optional): If set, returned tensor would be allocated in + the pinned memory. Works only for CPU tensors. Default: ``False``. +""" +) + +tf32_notes = { + "tf32_note": """This operator supports :ref:`TensorFloat32`.""" +} + +rocm_fp16_notes = { + "rocm_fp16_note": """On certain ROCm devices, when using float16 inputs this module will use \ +:ref:`different precision` for backward.""" +} + +reproducibility_notes = { + "forward_reproducibility_note": """This operation may behave nondeterministically when given tensors on \ +a CUDA device. See :doc:`/notes/randomness` for more information.""", + "backward_reproducibility_note": """This operation may produce nondeterministic gradients when given tensors on \ +a CUDA device. See :doc:`/notes/randomness` for more information.""", + "cudnn_reproducibility_note": """In some circumstances when given tensors on a CUDA device \ +and using CuDNN, this operator may select a nondeterministic algorithm to increase performance. If this is \ +undesirable, you can try to make the operation deterministic (potentially at \ +a performance cost) by setting ``torch.backends.cudnn.deterministic = True``. \ +See :doc:`/notes/randomness` for more information.""", +} + +sparse_support_notes = { + "sparse_beta_warning": """ +.. warning:: + Sparse support is a beta feature and some layout(s)/dtype/device combinations may not be supported, + or may not have autograd support. If you notice missing functionality please + open a feature request.""", +} + +add_docstr( + torch.abs, + r""" +abs(input, *, out=None) -> Tensor + +Computes the absolute value of each element in :attr:`input`. + +.. math:: + \text{out}_{i} = |\text{input}_{i}| +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.abs(torch.tensor([-1, -2, 3])) + tensor([ 1, 2, 3]) +""".format( + **common_args + ), +) + +add_docstr( + torch.absolute, + r""" +absolute(input, *, out=None) -> Tensor + +Alias for :func:`torch.abs` +""", +) + +add_docstr( + torch.acos, + r""" +acos(input, *, out=None) -> Tensor + +Computes the inverse cosine of each element in :attr:`input`. + +.. math:: + \text{out}_{i} = \cos^{-1}(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.3348, -0.5889, 0.2005, -0.1584]) + >>> torch.acos(a) + tensor([ 1.2294, 2.2004, 1.3690, 1.7298]) +""".format( + **common_args + ), +) + +add_docstr( + torch.arccos, + r""" +arccos(input, *, out=None) -> Tensor + +Alias for :func:`torch.acos`. +""", +) + +add_docstr( + torch.acosh, + r""" +acosh(input, *, out=None) -> Tensor + +Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \cosh^{-1}(\text{input}_{i}) + +Note: + The domain of the inverse hyperbolic cosine is `[1, inf)` and values outside this range + will be mapped to ``NaN``, except for `+ INF` for which the output is mapped to `+ INF`. +""" + + r""" +Args: + {input} + +Keyword arguments: + {out} + +Example:: + + >>> a = torch.randn(4).uniform_(1, 2) + >>> a + tensor([ 1.3192, 1.9915, 1.9674, 1.7151 ]) + >>> torch.acosh(a) + tensor([ 0.7791, 1.3120, 1.2979, 1.1341 ]) +""".format( + **common_args + ), +) + +add_docstr( + torch.arccosh, + r""" +arccosh(input, *, out=None) -> Tensor + +Alias for :func:`torch.acosh`. +""", +) + +add_docstr( + torch.index_add, + r""" +index_add(input, dim, index, source, *, alpha=1, out=None) -> Tensor + +See :meth:`~Tensor.index_add_` for function description. +""", +) + +add_docstr( + torch.index_copy, + r""" +index_copy(input, dim, index, source, *, out=None) -> Tensor + +See :meth:`~Tensor.index_add_` for function description. +""", +) + +add_docstr( + torch.index_reduce, + r""" +index_reduce(input, dim, index, source, reduce, *, include_self=True, out=None) -> Tensor + +See :meth:`~Tensor.index_reduce_` for function description. +""", +) + +add_docstr( + torch.add, + r""" +add(input, other, *, alpha=1, out=None) -> Tensor + +Adds :attr:`other`, scaled by :attr:`alpha`, to :attr:`input`. + +.. math:: + \text{{out}}_i = \text{{input}}_i + \text{{alpha}} \times \text{{other}}_i +""" + + r""" + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer, float, and complex inputs. + +Args: + {input} + other (Tensor or Number): the tensor or number to add to :attr:`input`. + +Keyword arguments: + alpha (Number): the multiplier for :attr:`other`. + {out} + +Examples:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.0202, 1.0985, 1.3506, -0.6056]) + >>> torch.add(a, 20) + tensor([ 20.0202, 21.0985, 21.3506, 19.3944]) + + >>> b = torch.randn(4) + >>> b + tensor([-0.9732, -0.3497, 0.6245, 0.4022]) + >>> c = torch.randn(4, 1) + >>> c + tensor([[ 0.3743], + [-1.7724], + [-0.5811], + [-0.8017]]) + >>> torch.add(b, c, alpha=10) + tensor([[ 2.7695, 3.3930, 4.3672, 4.1450], + [-18.6971, -18.0736, -17.0994, -17.3216], + [ -6.7845, -6.1610, -5.1868, -5.4090], + [ -8.9902, -8.3667, -7.3925, -7.6147]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.addbmm, + r""" +addbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + +Performs a batch matrix-matrix product of matrices stored +in :attr:`batch1` and :attr:`batch2`, +with a reduced add step (all matrix multiplications get accumulated +along the first dimension). +:attr:`input` is added to the final result. + +:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the +same number of matrices. + +If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a +:math:`(b \times m \times p)` tensor, :attr:`input` must be +:ref:`broadcastable ` with a :math:`(n \times p)` tensor +and :attr:`out` will be a :math:`(n \times p)` tensor. + +.. math:: + out = \beta\ \text{input} + \alpha\ (\sum_{i=0}^{b-1} \text{batch1}_i \mathbin{@} \text{batch2}_i) + +If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in +it will not be propagated. +""" + + r""" +For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and :attr:`alpha` +must be real numbers, otherwise they should be integers. + +{tf32_note} + +{rocm_fp16_note} + +Args: + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + +Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + input (Tensor): matrix to be added + alpha (Number, optional): multiplier for `batch1 @ batch2` (:math:`\alpha`) + {out} + +Example:: + + >>> M = torch.randn(3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.addbmm(M, batch1, batch2) + tensor([[ 6.6311, 0.0503, 6.9768, -12.0362, -2.1653], + [ -4.8185, -1.4255, -6.6760, 8.9453, 2.5743], + [ -3.8202, 4.3691, 1.0943, -1.1109, 5.4730]]) +""".format( + **common_args, **tf32_notes, **rocm_fp16_notes + ), +) + +add_docstr( + torch.addcdiv, + r""" +addcdiv(input, tensor1, tensor2, *, value=1, out=None) -> Tensor + +Performs the element-wise division of :attr:`tensor1` by :attr:`tensor2`, +multiplies the result by the scalar :attr:`value` and adds it to :attr:`input`. + +.. warning:: + Integer division with addcdiv is no longer supported, and in a future + release addcdiv will perform a true division of tensor1 and tensor2. + The historic addcdiv behavior can be implemented as + (input + value * torch.trunc(tensor1 / tensor2)).to(input.dtype) + for integer inputs and as (input + value * tensor1 / tensor2) for float inputs. + The future addcdiv behavior is just the latter implementation: + (input + value * tensor1 / tensor2), for all dtypes. + +.. math:: + \text{out}_i = \text{input}_i + \text{value} \times \frac{\text{tensor1}_i}{\text{tensor2}_i} +""" + + r""" + +The shapes of :attr:`input`, :attr:`tensor1`, and :attr:`tensor2` must be +:ref:`broadcastable `. + +For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be +a real number, otherwise an integer. + +Args: + input (Tensor): the tensor to be added + tensor1 (Tensor): the numerator tensor + tensor2 (Tensor): the denominator tensor + +Keyword args: + value (Number, optional): multiplier for :math:`\text{{tensor1}} / \text{{tensor2}}` + {out} + +Example:: + + >>> t = torch.randn(1, 3) + >>> t1 = torch.randn(3, 1) + >>> t2 = torch.randn(1, 3) + >>> torch.addcdiv(t, t1, t2, value=0.1) + tensor([[-0.2312, -3.6496, 0.1312], + [-1.0428, 3.4292, -0.1030], + [-0.5369, -0.9829, 0.0430]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.addcmul, + r""" +addcmul(input, tensor1, tensor2, *, value=1, out=None) -> Tensor + +Performs the element-wise multiplication of :attr:`tensor1` +by :attr:`tensor2`, multiplies the result by the scalar :attr:`value` +and adds it to :attr:`input`. + +.. math:: + \text{out}_i = \text{input}_i + \text{value} \times \text{tensor1}_i \times \text{tensor2}_i +""" + + r""" +The shapes of :attr:`tensor`, :attr:`tensor1`, and :attr:`tensor2` must be +:ref:`broadcastable `. + +For inputs of type `FloatTensor` or `DoubleTensor`, :attr:`value` must be +a real number, otherwise an integer. + +Args: + input (Tensor): the tensor to be added + tensor1 (Tensor): the tensor to be multiplied + tensor2 (Tensor): the tensor to be multiplied + +Keyword args: + value (Number, optional): multiplier for :math:`tensor1 .* tensor2` + {out} + +Example:: + + >>> t = torch.randn(1, 3) + >>> t1 = torch.randn(3, 1) + >>> t2 = torch.randn(1, 3) + >>> torch.addcmul(t, t1, t2, value=0.1) + tensor([[-0.8635, -0.6391, 1.6174], + [-0.7617, -0.5879, 1.7388], + [-0.8353, -0.6249, 1.6511]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.addmm, + r""" +addmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + +Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`. +The matrix :attr:`input` is added to the final result. + +If :attr:`mat1` is a :math:`(n \times m)` tensor, :attr:`mat2` is a +:math:`(m \times p)` tensor, then :attr:`input` must be +:ref:`broadcastable ` with a :math:`(n \times p)` tensor +and :attr:`out` will be a :math:`(n \times p)` tensor. + +:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between +:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively. + +.. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat1}_i \mathbin{@} \text{mat2}_i) + +If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in +it will not be propagated. +""" + + r""" +For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and +:attr:`alpha` must be real numbers, otherwise they should be integers. + +This operation has support for arguments with :ref:`sparse layouts`. If +:attr:`input` is sparse the result will have the same layout and if :attr:`out` +is provided it must have the same layout as :attr:`input`. + +{sparse_beta_warning} + +{tf32_note} + +{rocm_fp16_note} + +Args: + input (Tensor): matrix to be added + mat1 (Tensor): the first matrix to be matrix multiplied + mat2 (Tensor): the second matrix to be matrix multiplied + +Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + {out} + +Example:: + + >>> M = torch.randn(2, 3) + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.addmm(M, mat1, mat2) + tensor([[-4.8716, 1.4671, -1.3746], + [ 0.7573, -3.9555, -2.8681]]) +""".format( + **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes + ), +) + +add_docstr( + torch.adjoint, + r""" +adjoint(Tensor) -> Tensor +Returns a view of the tensor conjugated and with the last two dimensions transposed. + +``x.adjoint()`` is equivalent to ``x.transpose(-2, -1).conj()`` for complex tensors and +to ``x.transpose(-2, -1)`` for real tensors. + +Example:: + >>> x = torch.arange(4, dtype=torch.float) + >>> A = torch.complex(x, x).reshape(2, 2) + >>> A + tensor([[0.+0.j, 1.+1.j], + [2.+2.j, 3.+3.j]]) + >>> A.adjoint() + tensor([[0.-0.j, 2.-2.j], + [1.-1.j, 3.-3.j]]) + >>> (A.adjoint() == A.mH).all() + tensor(True) +""", +) + +add_docstr( + torch.sspaddmm, + r""" +sspaddmm(input, mat1, mat2, *, beta=1, alpha=1, out=None) -> Tensor + +Matrix multiplies a sparse tensor :attr:`mat1` with a dense tensor +:attr:`mat2`, then adds the sparse tensor :attr:`input` to the result. + +Note: This function is equivalent to :func:`torch.addmm`, except +:attr:`input` and :attr:`mat1` are sparse. + +Args: + input (Tensor): a sparse matrix to be added + mat1 (Tensor): a sparse matrix to be matrix multiplied + mat2 (Tensor): a dense matrix to be matrix multiplied + +Keyword args: + beta (Number, optional): multiplier for :attr:`mat` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\alpha`) + {out} +""".format( + **common_args + ), +) + +add_docstr( + torch.smm, + r""" +smm(input, mat) -> Tensor + +Performs a matrix multiplication of the sparse matrix :attr:`input` +with the dense matrix :attr:`mat`. + +Args: + input (Tensor): a sparse matrix to be matrix multiplied + mat (Tensor): a dense matrix to be matrix multiplied +""", +) + +add_docstr( + torch.addmv, + r""" +addmv(input, mat, vec, *, beta=1, alpha=1, out=None) -> Tensor + +Performs a matrix-vector product of the matrix :attr:`mat` and +the vector :attr:`vec`. +The vector :attr:`input` is added to the final result. + +If :attr:`mat` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of +size `m`, then :attr:`input` must be +:ref:`broadcastable ` with a 1-D tensor of size `n` and +:attr:`out` will be 1-D tensor of size `n`. + +:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between +:attr:`mat` and :attr:`vec` and the added tensor :attr:`input` respectively. + +.. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{mat} \mathbin{@} \text{vec}) + +If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in +it will not be propagated. +""" + + r""" +For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and +:attr:`alpha` must be real numbers, otherwise they should be integers. + +Args: + input (Tensor): vector to be added + mat (Tensor): matrix to be matrix multiplied + vec (Tensor): vector to be matrix multiplied + +Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`mat @ vec` (:math:`\alpha`) + {out} + +Example:: + + >>> M = torch.randn(2) + >>> mat = torch.randn(2, 3) + >>> vec = torch.randn(3) + >>> torch.addmv(M, mat, vec) + tensor([-0.3768, -5.5565]) +""".format( + **common_args + ), +) + +add_docstr( + torch.addr, + r""" +addr(input, vec1, vec2, *, beta=1, alpha=1, out=None) -> Tensor + +Performs the outer-product of vectors :attr:`vec1` and :attr:`vec2` +and adds it to the matrix :attr:`input`. + +Optional values :attr:`beta` and :attr:`alpha` are scaling factors on the +outer product between :attr:`vec1` and :attr:`vec2` and the added matrix +:attr:`input` respectively. + +.. math:: + \text{out} = \beta\ \text{input} + \alpha\ (\text{vec1} \otimes \text{vec2}) + +If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in +it will not be propagated. +""" + + r""" +If :attr:`vec1` is a vector of size `n` and :attr:`vec2` is a vector +of size `m`, then :attr:`input` must be +:ref:`broadcastable ` with a matrix of size +:math:`(n \times m)` and :attr:`out` will be a matrix of size +:math:`(n \times m)`. + +Args: + input (Tensor): matrix to be added + vec1 (Tensor): the first vector of the outer product + vec2 (Tensor): the second vector of the outer product + +Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{{vec1}} \otimes \text{{vec2}}` (:math:`\alpha`) + {out} + +Example:: + + >>> vec1 = torch.arange(1., 4.) + >>> vec2 = torch.arange(1., 3.) + >>> M = torch.zeros(3, 2) + >>> torch.addr(M, vec1, vec2) + tensor([[ 1., 2.], + [ 2., 4.], + [ 3., 6.]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.allclose, + r""" +allclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> bool + +This function checks if :attr:`input` and :attr:`other` satisfy the condition: + +.. math:: + \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert +""" + + r""" +elementwise, for all elements of :attr:`input` and :attr:`other`. The behaviour of this function is analogous to +`numpy.allclose `_ + +Args: + input (Tensor): first tensor to compare + other (Tensor): second tensor to compare + atol (float, optional): absolute tolerance. Default: 1e-08 + rtol (float, optional): relative tolerance. Default: 1e-05 + equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False`` + +Example:: + + >>> torch.allclose(torch.tensor([10000., 1e-07]), torch.tensor([10000.1, 1e-08])) + False + >>> torch.allclose(torch.tensor([10000., 1e-08]), torch.tensor([10000.1, 1e-09])) + True + >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')])) + False + >>> torch.allclose(torch.tensor([1.0, float('nan')]), torch.tensor([1.0, float('nan')]), equal_nan=True) + True +""", +) + +add_docstr( + torch.all, + r""" +all(input) -> Tensor + +Tests if all elements in :attr:`input` evaluate to `True`. + +.. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + +Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.all(a) + tensor(False, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.all(a) + tensor(False) + +.. function:: all(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + +For each row of :attr:`input` in the given dimension :attr:`dim`, +returns `True` if all elements in the row evaluate to `True` and `False` otherwise. + +{keepdim_details} + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + {out} + +Example:: + + >>> a = torch.rand(4, 2).bool() + >>> a + tensor([[True, True], + [True, False], + [True, True], + [True, True]], dtype=torch.bool) + >>> torch.all(a, dim=1) + tensor([ True, False, True, True], dtype=torch.bool) + >>> torch.all(a, dim=0) + tensor([ True, False], dtype=torch.bool) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.any, + r""" +any(input) -> Tensor + +Tests if any element in :attr:`input` evaluates to `True`. + +.. note:: This function matches the behaviour of NumPy in returning + output of dtype `bool` for all supported dtypes except `uint8`. + For `uint8` the dtype of output is `uint8` itself. + +Example:: + + >>> a = torch.rand(1, 2).bool() + >>> a + tensor([[False, True]], dtype=torch.bool) + >>> torch.any(a) + tensor(True, dtype=torch.bool) + >>> a = torch.arange(0, 3) + >>> a + tensor([0, 1, 2]) + >>> torch.any(a) + tensor(True) + +.. function:: any(input, dim, keepdim=False, *, out=None) -> Tensor + :noindex: + +For each row of :attr:`input` in the given dimension :attr:`dim`, +returns `True` if any element in the row evaluate to `True` and `False` otherwise. + +{keepdim_details} + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4, 2) < 0 + >>> a + tensor([[ True, True], + [False, True], + [ True, True], + [False, False]]) + >>> torch.any(a, 1) + tensor([ True, True, True, False]) + >>> torch.any(a, 0) + tensor([True, True]) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.angle, + r""" +angle(input, *, out=None) -> Tensor + +Computes the element-wise angle (in radians) of the given :attr:`input` tensor. + +.. math:: + \text{out}_{i} = angle(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +.. note:: Starting in PyTorch 1.8, angle returns pi for negative real numbers, + zero for non-negative real numbers, and propagates NaNs. Previously + the function would return zero for all real numbers and not propagate + floating-point NaNs. + +Example:: + + >>> torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159 + tensor([ 135., 135, -45]) +""".format( + **common_args + ), +) + +add_docstr( + torch.as_strided, + r""" +as_strided(input, size, stride, storage_offset=None) -> Tensor + +Create a view of an existing `torch.Tensor` :attr:`input` with specified +:attr:`size`, :attr:`stride` and :attr:`storage_offset`. + +.. warning:: + Prefer using other view functions, like :meth:`torch.Tensor.expand`, + to setting a view's strides manually with `as_strided`, as this + function's behavior depends on the implementation of a tensor's storage. + The constructed view of the storage must only refer to elements within + the storage or a runtime error will be thrown, and if the view is + "overlapped" (with multiple indices referring to the same element in + memory) its behavior is undefined. + +Args: + {input} + size (tuple or ints): the shape of the output tensor + stride (tuple or ints): the stride of the output tensor + storage_offset (int, optional): the offset in the underlying storage of the output tensor. + If ``None``, the storage_offset of the output tensor will match the input tensor. + +Example:: + + >>> x = torch.randn(3, 3) + >>> x + tensor([[ 0.9039, 0.6291, 1.0795], + [ 0.1586, 2.1939, -0.4900], + [-0.1909, -0.7503, 1.9355]]) + >>> t = torch.as_strided(x, (2, 2), (1, 2)) + >>> t + tensor([[0.9039, 1.0795], + [0.6291, 0.1586]]) + >>> t = torch.as_strided(x, (2, 2), (1, 2), 1) + tensor([[0.6291, 0.1586], + [1.0795, 2.1939]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.as_tensor, + r""" +as_tensor(data, dtype=None, device=None) -> Tensor + +Converts :attr:`data` into a tensor, sharing data and preserving autograd +history if possible. + +If :attr:`data` is already a tensor with the requested dtype and device +then :attr:`data` itself is returned, but if :attr:`data` is a +tensor with a different dtype or device then it's copied as if using +`data.to(dtype=dtype, device=device)`. + +If :attr:`data` is a NumPy array (an ndarray) with the same dtype and device then a +tensor is constructed using :func:`torch.from_numpy`. + +.. seealso:: + + :func:`torch.tensor` never shares its data and creates a new "leaf tensor" (see :doc:`/notes/autograd`). + + +Args: + {data} + {dtype} + device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor + then the device of data is used. If None and data is not a tensor then + the result tensor is constructed on the current device. + + +Example:: + + >>> a = numpy.array([1, 2, 3]) + >>> t = torch.as_tensor(a) + >>> t + tensor([ 1, 2, 3]) + >>> t[0] = -1 + >>> a + array([-1, 2, 3]) + + >>> a = numpy.array([1, 2, 3]) + >>> t = torch.as_tensor(a, device=torch.device('cuda')) + >>> t + tensor([ 1, 2, 3]) + >>> t[0] = -1 + >>> a + array([1, 2, 3]) +""".format( + **factory_data_common_args + ), +) + +add_docstr( + torch.asin, + r""" +asin(input, *, out=None) -> Tensor + +Returns a new tensor with the arcsine of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \sin^{-1}(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.5962, 1.4985, -0.4396, 1.4525]) + >>> torch.asin(a) + tensor([-0.6387, nan, -0.4552, nan]) +""".format( + **common_args + ), +) + +add_docstr( + torch.arcsin, + r""" +arcsin(input, *, out=None) -> Tensor + +Alias for :func:`torch.asin`. +""", +) + +add_docstr( + torch.asinh, + r""" +asinh(input, *, out=None) -> Tensor + +Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \sinh^{-1}(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword arguments: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.1606, -1.4267, -1.0899, -1.0250 ]) + >>> torch.asinh(a) + tensor([ 0.1599, -1.1534, -0.9435, -0.8990 ]) +""".format( + **common_args + ), +) + +add_docstr( + torch.arcsinh, + r""" +arcsinh(input, *, out=None) -> Tensor + +Alias for :func:`torch.asinh`. +""", +) + +add_docstr( + torch.atan, + r""" +atan(input, *, out=None) -> Tensor + +Returns a new tensor with the arctangent of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \tan^{-1}(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.2341, 0.2539, -0.6256, -0.6448]) + >>> torch.atan(a) + tensor([ 0.2299, 0.2487, -0.5591, -0.5727]) +""".format( + **common_args + ), +) + +add_docstr( + torch.arctan, + r""" +arctan(input, *, out=None) -> Tensor + +Alias for :func:`torch.atan`. +""", +) + +add_docstr( + torch.atan2, + r""" +atan2(input, other, *, out=None) -> Tensor + +Element-wise arctangent of :math:`\text{{input}}_{{i}} / \text{{other}}_{{i}}` +with consideration of the quadrant. Returns a new tensor with the signed angles +in radians between vector :math:`(\text{{other}}_{{i}}, \text{{input}}_{{i}})` +and vector :math:`(1, 0)`. (Note that :math:`\text{{other}}_{{i}}`, the second +parameter, is the x-coordinate, while :math:`\text{{input}}_{{i}}`, the first +parameter, is the y-coordinate.) + +The shapes of ``input`` and ``other`` must be +:ref:`broadcastable `. + +Args: + input (Tensor): the first input tensor + other (Tensor): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.9041, 0.0196, -0.3108, -2.4423]) + >>> torch.atan2(a, torch.randn(4)) + tensor([ 0.9833, 0.0811, -1.9743, -1.4151]) +""".format( + **common_args + ), +) + +add_docstr( + torch.arctan2, + r""" +arctan2(input, other, *, out=None) -> Tensor +Alias for :func:`torch.atan2`. +""", +) + +add_docstr( + torch.atanh, + r""" +atanh(input, *, out=None) -> Tensor + +Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`. + +Note: + The domain of the inverse hyperbolic tangent is `(-1, 1)` and values outside this range + will be mapped to ``NaN``, except for the values `1` and `-1` for which the output is + mapped to `+/-INF` respectively. + +.. math:: + \text{out}_{i} = \tanh^{-1}(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword arguments: + {out} + +Example:: + + >>> a = torch.randn(4).uniform_(-1, 1) + >>> a + tensor([ -0.9385, 0.2968, -0.8591, -0.1871 ]) + >>> torch.atanh(a) + tensor([ -1.7253, 0.3060, -1.2899, -0.1893 ]) +""".format( + **common_args + ), +) + +add_docstr( + torch.arctanh, + r""" +arctanh(input, *, out=None) -> Tensor + +Alias for :func:`torch.atanh`. +""", +) + +add_docstr( + torch.asarray, + r""" +asarray(obj, *, dtype=None, device=None, copy=None, requires_grad=False) -> Tensor + +Converts :attr:`obj` to a tensor. + +:attr:`obj` can be one of: + +1. a tensor +2. a NumPy array or a NumPy scalar +3. a DLPack capsule +4. an object that implements Python's buffer protocol +5. a scalar +6. a sequence of scalars + +When :attr:`obj` is a tensor, NumPy array, or DLPack capsule the returned tensor will, +by default, not require a gradient, have the same datatype as :attr:`obj`, be on the +same device, and share memory with it. These properties can be controlled with the +:attr:`dtype`, :attr:`device`, :attr:`copy`, and :attr:`requires_grad` keyword arguments. +If the returned tensor is of a different datatype, on a different device, or a copy is +requested then it will not share its memory with :attr:`obj`. If :attr:`requires_grad` +is ``True`` then the returned tensor will require a gradient, and if :attr:`obj` is +also a tensor with an autograd history then the returned tensor will have the same history. + +When :attr:`obj` is not a tensor, NumPy array, or DLPack capsule but implements Python's +buffer protocol then the buffer is interpreted as an array of bytes grouped according to +the size of the datatype passed to the :attr:`dtype` keyword argument. (If no datatype is +passed then the default floating point datatype is used, instead.) The returned tensor +will have the specified datatype (or default floating point datatype if none is specified) +and, by default, be on the CPU device and share memory with the buffer. + +When :attr:`obj` is a NumPy scalar, the returned tensor will be a 0-dimensional tensor on +the CPU and that doesn't share its memory (i.e. ``copy=True``). By default datatype will +be the PyTorch datatype corresponding to the NumPy's scalar's datatype. + +When :attr:`obj` is none of the above but a scalar, or a sequence of scalars then the +returned tensor will, by default, infer its datatype from the scalar values, be on the +current default device, and not share its memory. + +.. seealso:: + + :func:`torch.tensor` creates a tensor that always copies the data from the input object. + :func:`torch.from_numpy` creates a tensor that always shares memory from NumPy arrays. + :func:`torch.frombuffer` creates a tensor that always shares memory from objects that + implement the buffer protocol. + :func:`torch.from_dlpack` creates a tensor that always shares memory from + DLPack capsules. + +Args: + obj (object): a tensor, NumPy array, DLPack Capsule, object that implements Python's + buffer protocol, scalar, or sequence of scalars. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the datatype of the returned tensor. + Default: ``None``, which causes the datatype of the returned tensor to be + inferred from :attr:`obj`. + copy (bool, optional): controls whether the returned tensor shares memory with :attr:`obj`. + Default: ``None``, which causes the returned tensor to share memory with :attr:`obj` + whenever possible. If ``True`` then the returned tensor does not share its memory. + If ``False`` then the returned tensor shares its memory with :attr:`obj` and an + error is thrown if it cannot. + device (:class:`torch.device`, optional): the device of the returned tensor. + Default: ``None``, which causes the device of :attr:`obj` to be used. Or, if + :attr:`obj` is a Python sequence, the current default device will be used. + requires_grad (bool, optional): whether the returned tensor requires grad. + Default: ``False``, which causes the returned tensor not to require a gradient. + If ``True``, then the returned tensor will require a gradient, and if :attr:`obj` + is also a tensor with an autograd history then the returned tensor will have + the same history. + +Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> # Shares memory with tensor 'a' + >>> b = torch.asarray(a) + >>> a.data_ptr() == b.data_ptr() + True + >>> # Forces memory copy + >>> c = torch.asarray(a, copy=True) + >>> a.data_ptr() == c.data_ptr() + False + + >>> a = torch.tensor([1., 2., 3.], requires_grad=True) + >>> b = a + 2 + >>> b + tensor([3., 4., 5.], grad_fn=) + >>> # Shares memory with tensor 'b', with no grad + >>> c = torch.asarray(b) + >>> c + tensor([3., 4., 5.]) + >>> # Shares memory with tensor 'b', retaining autograd history + >>> d = torch.asarray(b, requires_grad=True) + >>> d + tensor([3., 4., 5.], grad_fn=) + + >>> array = numpy.array([1, 2, 3]) + >>> # Shares memory with array 'array' + >>> t1 = torch.asarray(array) + >>> array.__array_interface__['data'][0] == t1.data_ptr() + True + >>> # Copies memory due to dtype mismatch + >>> t2 = torch.asarray(array, dtype=torch.float32) + >>> array.__array_interface__['data'][0] == t2.data_ptr() + False + + >>> scalar = numpy.float64(0.5) + >>> torch.asarray(scalar) + tensor(0.5000, dtype=torch.float64) +""", +) + +add_docstr( + torch.baddbmm, + r""" +baddbmm(input, batch1, batch2, *, beta=1, alpha=1, out=None) -> Tensor + +Performs a batch matrix-matrix product of matrices in :attr:`batch1` +and :attr:`batch2`. +:attr:`input` is added to the final result. + +:attr:`batch1` and :attr:`batch2` must be 3-D tensors each containing the same +number of matrices. + +If :attr:`batch1` is a :math:`(b \times n \times m)` tensor, :attr:`batch2` is a +:math:`(b \times m \times p)` tensor, then :attr:`input` must be +:ref:`broadcastable ` with a +:math:`(b \times n \times p)` tensor and :attr:`out` will be a +:math:`(b \times n \times p)` tensor. Both :attr:`alpha` and :attr:`beta` mean the +same as the scaling factors used in :meth:`torch.addbmm`. + +.. math:: + \text{out}_i = \beta\ \text{input}_i + \alpha\ (\text{batch1}_i \mathbin{@} \text{batch2}_i) + +If :attr:`beta` is 0, then :attr:`input` will be ignored, and `nan` and `inf` in +it will not be propagated. +""" + + r""" +For inputs of type `FloatTensor` or `DoubleTensor`, arguments :attr:`beta` and +:attr:`alpha` must be real numbers, otherwise they should be integers. + +{tf32_note} + +{rocm_fp16_note} + +Args: + input (Tensor): the tensor to be added + batch1 (Tensor): the first batch of matrices to be multiplied + batch2 (Tensor): the second batch of matrices to be multiplied + +Keyword args: + beta (Number, optional): multiplier for :attr:`input` (:math:`\beta`) + alpha (Number, optional): multiplier for :math:`\text{{batch1}} \mathbin{{@}} \text{{batch2}}` (:math:`\alpha`) + {out} + +Example:: + + >>> M = torch.randn(10, 3, 5) + >>> batch1 = torch.randn(10, 3, 4) + >>> batch2 = torch.randn(10, 4, 5) + >>> torch.baddbmm(M, batch1, batch2).size() + torch.Size([10, 3, 5]) +""".format( + **common_args, **tf32_notes, **rocm_fp16_notes + ), +) + +add_docstr( + torch.bernoulli, + r""" +bernoulli(input, *, generator=None, out=None) -> Tensor + +Draws binary random numbers (0 or 1) from a Bernoulli distribution. + +The :attr:`input` tensor should be a tensor containing probabilities +to be used for drawing the binary random number. +Hence, all values in :attr:`input` have to be in the range: +:math:`0 \leq \text{input}_i \leq 1`. + +The :math:`\text{i}^{th}` element of the output tensor will draw a +value :math:`1` according to the :math:`\text{i}^{th}` probability value given +in :attr:`input`. + +.. math:: + \text{out}_{i} \sim \mathrm{Bernoulli}(p = \text{input}_{i}) +""" + + r""" +The returned :attr:`out` tensor only has values 0 or 1 and is of the same +shape as :attr:`input`. + +:attr:`out` can have integral ``dtype``, but :attr:`input` must have floating +point ``dtype``. + +Args: + input (Tensor): the input tensor of probability values for the Bernoulli distribution + +Keyword args: + {generator} + {out} + +Example:: + + >>> a = torch.empty(3, 3).uniform_(0, 1) # generate a uniform random matrix with range [0, 1] + >>> a + tensor([[ 0.1737, 0.0950, 0.3609], + [ 0.7148, 0.0289, 0.2676], + [ 0.9456, 0.8937, 0.7202]]) + >>> torch.bernoulli(a) + tensor([[ 1., 0., 0.], + [ 0., 0., 0.], + [ 1., 1., 1.]]) + + >>> a = torch.ones(3, 3) # probability of drawing "1" is 1 + >>> torch.bernoulli(a) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.], + [ 1., 1., 1.]]) + >>> a = torch.zeros(3, 3) # probability of drawing "1" is 0 + >>> torch.bernoulli(a) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.], + [ 0., 0., 0.]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.bincount, + r""" +bincount(input, weights=None, minlength=0) -> Tensor + +Count the frequency of each value in an array of non-negative ints. + +The number of bins (size 1) is one larger than the largest value in +:attr:`input` unless :attr:`input` is empty, in which case the result is a +tensor of size 0. If :attr:`minlength` is specified, the number of bins is at least +:attr:`minlength` and if :attr:`input` is empty, then the result is tensor of size +:attr:`minlength` filled with zeros. If ``n`` is the value at position ``i``, +``out[n] += weights[i]`` if :attr:`weights` is specified else +``out[n] += 1``. + +Note: + {backward_reproducibility_note} + +Arguments: + input (Tensor): 1-d int tensor + weights (Tensor): optional, weight for each value in the input tensor. + Should be of same size as input tensor. + minlength (int): optional, minimum number of bins. Should be non-negative. + +Returns: + output (Tensor): a tensor of shape ``Size([max(input) + 1])`` if + :attr:`input` is non-empty, else ``Size(0)`` + +Example:: + + >>> input = torch.randint(0, 8, (5,), dtype=torch.int64) + >>> weights = torch.linspace(0, 1, steps=5) + >>> input, weights + (tensor([4, 3, 6, 3, 4]), + tensor([ 0.0000, 0.2500, 0.5000, 0.7500, 1.0000]) + + >>> torch.bincount(input) + tensor([0, 0, 0, 2, 2, 0, 1]) + + >>> input.bincount(weights) + tensor([0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 0.0000, 0.5000]) +""".format( + **reproducibility_notes + ), +) + +add_docstr( + torch.bitwise_not, + r""" +bitwise_not(input, *, out=None) -> Tensor + +Computes the bitwise NOT of the given input tensor. The input tensor must be of +integral or Boolean types. For bool tensors, it computes the logical NOT. + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.bitwise_not(torch.tensor([-1, -2, 3], dtype=torch.int8)) + tensor([ 0, 1, -4], dtype=torch.int8) +""".format( + **common_args + ), +) + +add_docstr( + torch.bmm, + r""" +bmm(input, mat2, *, out=None) -> Tensor + +Performs a batch matrix-matrix product of matrices stored in :attr:`input` +and :attr:`mat2`. + +:attr:`input` and :attr:`mat2` must be 3-D tensors each containing +the same number of matrices. + +If :attr:`input` is a :math:`(b \times n \times m)` tensor, :attr:`mat2` is a +:math:`(b \times m \times p)` tensor, :attr:`out` will be a +:math:`(b \times n \times p)` tensor. + +.. math:: + \text{out}_i = \text{input}_i \mathbin{@} \text{mat2}_i +""" + + r""" +{tf32_note} + +{rocm_fp16_note} + +.. note:: This function does not :ref:`broadcast `. + For broadcasting matrix products, see :func:`torch.matmul`. + +Args: + input (Tensor): the first batch of matrices to be multiplied + mat2 (Tensor): the second batch of matrices to be multiplied + +Keyword Args: + {out} + +Example:: + + >>> input = torch.randn(10, 3, 4) + >>> mat2 = torch.randn(10, 4, 5) + >>> res = torch.bmm(input, mat2) + >>> res.size() + torch.Size([10, 3, 5]) +""".format( + **common_args, **tf32_notes, **rocm_fp16_notes + ), +) + +add_docstr( + torch.bitwise_and, + r""" +bitwise_and(input, other, *, out=None) -> Tensor + +Computes the bitwise AND of :attr:`input` and :attr:`other`. The input tensor must be of +integral or Boolean types. For bool tensors, it computes the logical AND. + +Args: + input: the first input tensor + other: the second input tensor + +Keyword args: + {out} + +Example:: + + >>> torch.bitwise_and(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([1, 0, 3], dtype=torch.int8) + >>> torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ False, True, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.bitwise_or, + r""" +bitwise_or(input, other, *, out=None) -> Tensor + +Computes the bitwise OR of :attr:`input` and :attr:`other`. The input tensor must be of +integral or Boolean types. For bool tensors, it computes the logical OR. + +Args: + input: the first input tensor + other: the second input tensor + +Keyword args: + {out} + +Example:: + + >>> torch.bitwise_or(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-1, -2, 3], dtype=torch.int8) + >>> torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ True, True, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.bitwise_xor, + r""" +bitwise_xor(input, other, *, out=None) -> Tensor + +Computes the bitwise XOR of :attr:`input` and :attr:`other`. The input tensor must be of +integral or Boolean types. For bool tensors, it computes the logical XOR. + +Args: + input: the first input tensor + other: the second input tensor + +Keyword args: + {out} + +Example:: + + >>> torch.bitwise_xor(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-2, -2, 0], dtype=torch.int8) + >>> torch.bitwise_xor(torch.tensor([True, True, False]), torch.tensor([False, True, False])) + tensor([ True, False, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.bitwise_left_shift, + r""" +bitwise_left_shift(input, other, *, out=None) -> Tensor + +Computes the left arithmetic shift of :attr:`input` by :attr:`other` bits. +The input tensor must be of integral type. This operator supports +:ref:`broadcasting to a common shape ` and +:ref:`type promotion `. + +The operation applied is: + +.. math:: + \text{{out}}_i = \text{{input}}_i << \text{{other}}_i + +Args: + input (Tensor or Scalar): the first input tensor + other (Tensor or Scalar): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> torch.bitwise_left_shift(torch.tensor([-1, -2, 3], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-2, -2, 24], dtype=torch.int8) +""".format( + **common_args + ), +) + +add_docstr( + torch.bitwise_right_shift, + r""" +bitwise_right_shift(input, other, *, out=None) -> Tensor + +Computes the right arithmetic shift of :attr:`input` by :attr:`other` bits. +The input tensor must be of integral type. This operator supports +:ref:`broadcasting to a common shape ` and +:ref:`type promotion `. +In any case, if the value of the right operand is negative or is greater +or equal to the number of bits in the promoted left operand, the behavior is undefined. + +The operation applied is: + +.. math:: + \text{{out}}_i = \text{{input}}_i >> \text{{other}}_i + +Args: + input (Tensor or Scalar): the first input tensor + other (Tensor or Scalar): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> torch.bitwise_right_shift(torch.tensor([-2, -7, 31], dtype=torch.int8), torch.tensor([1, 0, 3], dtype=torch.int8)) + tensor([-1, -7, 3], dtype=torch.int8) +""".format( + **common_args + ), +) + +add_docstr( + torch.broadcast_to, + r""" +broadcast_to(input, shape) -> Tensor + +Broadcasts :attr:`input` to the shape :attr:`\shape`. +Equivalent to calling ``input.expand(shape)``. See :meth:`~Tensor.expand` for details. + +Args: + {input} + shape (list, tuple, or :class:`torch.Size`): the new shape. + +Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> torch.broadcast_to(x, (3, 3)) + tensor([[1, 2, 3], + [1, 2, 3], + [1, 2, 3]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.stack, + r""" +stack(tensors, dim=0, *, out=None) -> Tensor + +Concatenates a sequence of tensors along a new dimension. + +All tensors need to be of the same size. + +.. seealso:: + + :func:`torch.cat` concatenates the given sequence along an existing dimension. + +Arguments: + tensors (sequence of Tensors): sequence of tensors to concatenate + dim (int, optional): dimension to insert. Has to be between 0 and the number + of dimensions of concatenated tensors (inclusive). Default: 0 + +Keyword args: + {out} + +Example:: + + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 0.3367, 0.1288, 0.2345], + [ 0.2303, -1.1229, -0.1863]]) + >>> x = torch.stack((x, x)) # same as torch.stack((x, x), dim=0) + >>> x + tensor([[[ 0.3367, 0.1288, 0.2345], + [ 0.2303, -1.1229, -0.1863]], + + [[ 0.3367, 0.1288, 0.2345], + [ 0.2303, -1.1229, -0.1863]]]) + >>> x.size() + torch.Size([2, 2, 3]) + >>> x = torch.stack((x, x), dim=1) + tensor([[[ 0.3367, 0.1288, 0.2345], + [ 0.3367, 0.1288, 0.2345]], + + [[ 0.2303, -1.1229, -0.1863], + [ 0.2303, -1.1229, -0.1863]]]) + >>> x = torch.stack((x, x), dim=2) + tensor([[[ 0.3367, 0.3367], + [ 0.1288, 0.1288], + [ 0.2345, 0.2345]], + + [[ 0.2303, 0.2303], + [-1.1229, -1.1229], + [-0.1863, -0.1863]]]) + >>> x = torch.stack((x, x), dim=-1) + tensor([[[ 0.3367, 0.3367], + [ 0.1288, 0.1288], + [ 0.2345, 0.2345]], + + [[ 0.2303, 0.2303], + [-1.1229, -1.1229], + [-0.1863, -0.1863]]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.hstack, + r""" +hstack(tensors, *, out=None) -> Tensor + +Stack tensors in sequence horizontally (column wise). + +This is equivalent to concatenation along the first axis for 1-D tensors, and along the second axis for all other tensors. + +Args: + tensors (sequence of Tensors): sequence of tensors to concatenate + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5, 6]) + >>> torch.hstack((a,b)) + tensor([1, 2, 3, 4, 5, 6]) + >>> a = torch.tensor([[1],[2],[3]]) + >>> b = torch.tensor([[4],[5],[6]]) + >>> torch.hstack((a,b)) + tensor([[1, 4], + [2, 5], + [3, 6]]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.vstack, + r""" +vstack(tensors, *, out=None) -> Tensor + +Stack tensors in sequence vertically (row wise). + +This is equivalent to concatenation along the first axis after all 1-D tensors have been reshaped by :func:`torch.atleast_2d`. + +Args: + tensors (sequence of Tensors): sequence of tensors to concatenate + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5, 6]) + >>> torch.vstack((a,b)) + tensor([[1, 2, 3], + [4, 5, 6]]) + >>> a = torch.tensor([[1],[2],[3]]) + >>> b = torch.tensor([[4],[5],[6]]) + >>> torch.vstack((a,b)) + tensor([[1], + [2], + [3], + [4], + [5], + [6]]) + + +""".format( + **common_args + ), +) + +add_docstr( + torch.dstack, + r""" +dstack(tensors, *, out=None) -> Tensor + +Stack tensors in sequence depthwise (along third axis). + +This is equivalent to concatenation along the third axis after 1-D and 2-D tensors have been reshaped by :func:`torch.atleast_3d`. + +Args: + tensors (sequence of Tensors): sequence of tensors to concatenate + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5, 6]) + >>> torch.dstack((a,b)) + tensor([[[1, 4], + [2, 5], + [3, 6]]]) + >>> a = torch.tensor([[1],[2],[3]]) + >>> b = torch.tensor([[4],[5],[6]]) + >>> torch.dstack((a,b)) + tensor([[[1, 4]], + [[2, 5]], + [[3, 6]]]) + + +""".format( + **common_args + ), +) + +add_docstr( + torch.tensor_split, + r""" +tensor_split(input, indices_or_sections, dim=0) -> List of Tensors + +Splits a tensor into multiple sub-tensors, all of which are views of :attr:`input`, +along dimension :attr:`dim` according to the indices or number of sections specified +by :attr:`indices_or_sections`. This function is based on NumPy's +:func:`numpy.array_split`. + +Args: + input (Tensor): the tensor to split + indices_or_sections (Tensor, int or list or tuple of ints): + If :attr:`indices_or_sections` is an integer ``n`` or a zero dimensional long tensor + with value ``n``, :attr:`input` is split into ``n`` sections along dimension :attr:`dim`. + If :attr:`input` is divisible by ``n`` along dimension :attr:`dim`, each + section will be of equal size, :code:`input.size(dim) / n`. If :attr:`input` + is not divisible by ``n``, the sizes of the first :code:`int(input.size(dim) % n)` + sections will have size :code:`int(input.size(dim) / n) + 1`, and the rest will + have size :code:`int(input.size(dim) / n)`. + + If :attr:`indices_or_sections` is a list or tuple of ints, or a one-dimensional long + tensor, then :attr:`input` is split along dimension :attr:`dim` at each of the indices + in the list, tuple or tensor. For instance, :code:`indices_or_sections=[2, 3]` and :code:`dim=0` + would result in the tensors :code:`input[:2]`, :code:`input[2:3]`, and :code:`input[3:]`. + + If :attr:`indices_or_sections` is a tensor, it must be a zero-dimensional or one-dimensional + long tensor on the CPU. + + dim (int, optional): dimension along which to split the tensor. Default: ``0`` + +Example:: + + >>> x = torch.arange(8) + >>> torch.tensor_split(x, 3) + (tensor([0, 1, 2]), tensor([3, 4, 5]), tensor([6, 7])) + + >>> x = torch.arange(7) + >>> torch.tensor_split(x, 3) + (tensor([0, 1, 2]), tensor([3, 4]), tensor([5, 6])) + >>> torch.tensor_split(x, (1, 6)) + (tensor([0]), tensor([1, 2, 3, 4, 5]), tensor([6])) + + >>> x = torch.arange(14).reshape(2, 7) + >>> x + tensor([[ 0, 1, 2, 3, 4, 5, 6], + [ 7, 8, 9, 10, 11, 12, 13]]) + >>> torch.tensor_split(x, 3, dim=1) + (tensor([[0, 1, 2], + [7, 8, 9]]), + tensor([[ 3, 4], + [10, 11]]), + tensor([[ 5, 6], + [12, 13]])) + >>> torch.tensor_split(x, (1, 6), dim=1) + (tensor([[0], + [7]]), + tensor([[ 1, 2, 3, 4, 5], + [ 8, 9, 10, 11, 12]]), + tensor([[ 6], + [13]])) +""", +) + +add_docstr( + torch.chunk, + r""" +chunk(input, chunks, dim=0) -> List of Tensors + +Attempts to split a tensor into the specified number of chunks. Each chunk is a view of +the input tensor. + + +.. note:: + + This function may return fewer than the specified number of chunks! + +.. seealso:: + + :func:`torch.tensor_split` a function that always returns exactly the specified number of chunks + +If the tensor size along the given dimension :attr:`dim` is divisible by :attr:`chunks`, +all returned chunks will be the same size. +If the tensor size along the given dimension :attr:`dim` is not divisible by :attr:`chunks`, +all returned chunks will be the same size, except the last one. +If such division is not possible, this function may return fewer +than the specified number of chunks. + +Arguments: + input (Tensor): the tensor to split + chunks (int): number of chunks to return + dim (int): dimension along which to split the tensor + +Example: + >>> torch.arange(11).chunk(6) + (tensor([0, 1]), + tensor([2, 3]), + tensor([4, 5]), + tensor([6, 7]), + tensor([8, 9]), + tensor([10])) + >>> torch.arange(12).chunk(6) + (tensor([0, 1]), + tensor([2, 3]), + tensor([4, 5]), + tensor([6, 7]), + tensor([8, 9]), + tensor([10, 11])) + >>> torch.arange(13).chunk(6) + (tensor([0, 1, 2]), + tensor([3, 4, 5]), + tensor([6, 7, 8]), + tensor([ 9, 10, 11]), + tensor([12])) +""", +) + +add_docstr( + torch.unsafe_chunk, + r""" +unsafe_chunk(input, chunks, dim=0) -> List of Tensors + +Works like :func:`torch.chunk` but without enforcing the autograd restrictions +on inplace modification of the outputs. + +.. warning:: + This function is safe to use as long as only the input, or only the outputs + are modified inplace after calling this function. It is user's + responsibility to ensure that is the case. If both the input and one or more + of the outputs are modified inplace, gradients computed by autograd will be + silently incorrect. +""", +) + +add_docstr( + torch.unsafe_split, + r""" +unsafe_split(tensor, split_size_or_sections, dim=0) -> List of Tensors + +Works like :func:`torch.split` but without enforcing the autograd restrictions +on inplace modification of the outputs. + +.. warning:: + This function is safe to use as long as only the input, or only the outputs + are modified inplace after calling this function. It is user's + responsibility to ensure that is the case. If both the input and one or more + of the outputs are modified inplace, gradients computed by autograd will be + silently incorrect. +""", +) + +add_docstr( + torch.hsplit, + r""" +hsplit(input, indices_or_sections) -> List of Tensors + +Splits :attr:`input`, a tensor with one or more dimensions, into multiple tensors +horizontally according to :attr:`indices_or_sections`. Each split is a view of +:attr:`input`. + +If :attr:`input` is one dimensional this is equivalent to calling +torch.tensor_split(input, indices_or_sections, dim=0) (the split dimension is +zero), and if :attr:`input` has two or more dimensions it's equivalent to calling +torch.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1), +except that if :attr:`indices_or_sections` is an integer it must evenly divide +the split dimension or a runtime error will be thrown. + +This function is based on NumPy's :func:`numpy.hsplit`. + +Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + +Example:: + >>> t = torch.arange(16.0).reshape(4,4) + >>> t + tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> torch.hsplit(t, 2) + (tensor([[ 0., 1.], + [ 4., 5.], + [ 8., 9.], + [12., 13.]]), + tensor([[ 2., 3.], + [ 6., 7.], + [10., 11.], + [14., 15.]])) + >>> torch.hsplit(t, [3, 6]) + (tensor([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [12., 13., 14.]]), + tensor([[ 3.], + [ 7.], + [11.], + [15.]]), + tensor([], size=(4, 0))) + +""", +) + +add_docstr( + torch.vsplit, + r""" +vsplit(input, indices_or_sections) -> List of Tensors + +Splits :attr:`input`, a tensor with two or more dimensions, into multiple tensors +vertically according to :attr:`indices_or_sections`. Each split is a view of +:attr:`input`. + +This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=0) +(the split dimension is 0), except that if :attr:`indices_or_sections` is an integer +it must evenly divide the split dimension or a runtime error will be thrown. + +This function is based on NumPy's :func:`numpy.vsplit`. + +Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + +Example:: + >>> t = torch.arange(16.0).reshape(4,4) + >>> t + tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) + >>> torch.vsplit(t, 2) + (tensor([[0., 1., 2., 3.], + [4., 5., 6., 7.]]), + tensor([[ 8., 9., 10., 11.], + [12., 13., 14., 15.]])) + >>> torch.vsplit(t, [3, 6]) + (tensor([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]]), + tensor([[12., 13., 14., 15.]]), + tensor([], size=(0, 4))) + +""", +) + +add_docstr( + torch.dsplit, + r""" +dsplit(input, indices_or_sections) -> List of Tensors + +Splits :attr:`input`, a tensor with three or more dimensions, into multiple tensors +depthwise according to :attr:`indices_or_sections`. Each split is a view of +:attr:`input`. + +This is equivalent to calling torch.tensor_split(input, indices_or_sections, dim=2) +(the split dimension is 2), except that if :attr:`indices_or_sections` is an integer +it must evenly divide the split dimension or a runtime error will be thrown. + +This function is based on NumPy's :func:`numpy.dsplit`. + +Args: + input (Tensor): tensor to split. + indices_or_sections (int or list or tuple of ints): See argument in :func:`torch.tensor_split`. + +Example:: + >>> t = torch.arange(16.0).reshape(2, 2, 4) + >>> t + tensor([[[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]], + [[ 8., 9., 10., 11.], + [12., 13., 14., 15.]]]) + >>> torch.dsplit(t, 2) + (tensor([[[ 0., 1.], + [ 4., 5.]], + [[ 8., 9.], + [12., 13.]]]), + tensor([[[ 2., 3.], + [ 6., 7.]], + [[10., 11.], + [14., 15.]]])) + + >>> torch.dsplit(t, [3, 6]) + (tensor([[[ 0., 1., 2.], + [ 4., 5., 6.]], + [[ 8., 9., 10.], + [12., 13., 14.]]]), + tensor([[[ 3.], + [ 7.]], + [[11.], + [15.]]]), + tensor([], size=(2, 2, 0))) + +""", +) + +add_docstr( + torch.can_cast, + r""" +can_cast(from, to) -> bool + +Determines if a type conversion is allowed under PyTorch casting rules +described in the type promotion :ref:`documentation `. + +Args: + from (dtype): The original :class:`torch.dtype`. + to (dtype): The target :class:`torch.dtype`. + +Example:: + + >>> torch.can_cast(torch.double, torch.float) + True + >>> torch.can_cast(torch.float, torch.int) + False +""", +) + +add_docstr( + torch.corrcoef, + r""" +corrcoef(input) -> Tensor + +Estimates the Pearson product-moment correlation coefficient matrix of the variables given by the :attr:`input` matrix, +where rows are the variables and columns are the observations. + +.. note:: + + The correlation coefficient matrix R is computed using the covariance matrix C as given by + :math:`R_{ij} = \frac{ C_{ij} } { \sqrt{ C_{ii} * C_{jj} } }` + +.. note:: + + Due to floating point rounding, the resulting array may not be Hermitian and its diagonal elements may not be 1. + The real and imaginary values are clipped to the interval [-1, 1] in an attempt to improve this situation. + +Args: + input (Tensor): A 2D matrix containing multiple variables and observations, or a + Scalar or 1D vector representing a single variable. + +Returns: + (Tensor) The correlation coefficient matrix of the variables. + +.. seealso:: + + :func:`torch.cov` covariance matrix. + +Example:: + + >>> x = torch.tensor([[0, 1, 2], [2, 1, 0]]) + >>> torch.corrcoef(x) + tensor([[ 1., -1.], + [-1., 1.]]) + >>> x = torch.randn(2, 4) + >>> x + tensor([[-0.2678, -0.0908, -0.3766, 0.2780], + [-0.5812, 0.1535, 0.2387, 0.2350]]) + >>> torch.corrcoef(x) + tensor([[1.0000, 0.3582], + [0.3582, 1.0000]]) + >>> torch.corrcoef(x[0]) + tensor(1.) +""", +) + +add_docstr( + torch.cov, + r""" +cov(input, *, correction=1, fweights=None, aweights=None) -> Tensor + +Estimates the covariance matrix of the variables given by the :attr:`input` matrix, where rows are +the variables and columns are the observations. + +A covariance matrix is a square matrix giving the covariance of each pair of variables. The diagonal contains +the variance of each variable (covariance of a variable with itself). By definition, if :attr:`input` represents +a single variable (Scalar or 1D) then its variance is returned. + +The sample covariance of the variables :math:`x` and :math:`y` is given by: + +.. math:: + \text{cov}(x,y) = \frac{\sum^{N}_{i = 1}(x_{i} - \bar{x})(y_{i} - \bar{y})}{\max(0,~N~-~\delta N)} + +where :math:`\bar{x}` and :math:`\bar{y}` are the simple means of the :math:`x` and :math:`y` respectively, and +:math:`\delta N` is the :attr:`correction`. + +If :attr:`fweights` and/or :attr:`aweights` are provided, the weighted covariance +is calculated, which is given by: + +.. math:: + \text{cov}_w(x,y) = \frac{\sum^{N}_{i = 1}w_i(x_{i} - \mu_x^*)(y_{i} - \mu_y^*)} + {\max(0,~\sum^{N}_{i = 1}w_i~-~\frac{\sum^{N}_{i = 1}w_ia_i}{\sum^{N}_{i = 1}w_i}~\delta N)} + +where :math:`w` denotes :attr:`fweights` or :attr:`aweights` (``f`` and ``a`` for brevity) based on whichever is +provided, or :math:`w = f \times a` if both are provided, and +:math:`\mu_x^* = \frac{\sum^{N}_{i = 1}w_ix_{i} }{\sum^{N}_{i = 1}w_i}` is the weighted mean of the variable. If not +provided, ``f`` and/or ``a`` can be seen as a :math:`\mathbb{1}` vector of appropriate size. + +Args: + input (Tensor): A 2D matrix containing multiple variables and observations, or a + Scalar or 1D vector representing a single variable. + +Keyword Args: + correction (int, optional): difference between the sample size and sample degrees of freedom. + Defaults to Bessel's correction, ``correction = 1`` which returns the unbiased estimate, + even if both :attr:`fweights` and :attr:`aweights` are specified. ``correction = 0`` + will return the simple average. Defaults to ``1``. + fweights (tensor, optional): A Scalar or 1D tensor of observation vector frequencies representing the number of + times each observation should be repeated. Its numel must equal the number of columns of :attr:`input`. + Must have integral dtype. Ignored if ``None``. Defaults to ``None``. + aweights (tensor, optional): A Scalar or 1D array of observation vector weights. + These relative weights are typically large for observations considered “important” and smaller for + observations considered less “important”. Its numel must equal the number of columns of :attr:`input`. + Must have floating point dtype. Ignored if ``None``. Defaults to ``None``. + +Returns: + (Tensor) The covariance matrix of the variables. + +.. seealso:: + + :func:`torch.corrcoef` normalized covariance matrix. + +Example:: + >>> x = torch.tensor([[0, 2], [1, 1], [2, 0]]).T + >>> x + tensor([[0, 1, 2], + [2, 1, 0]]) + >>> torch.cov(x) + tensor([[ 1., -1.], + [-1., 1.]]) + >>> torch.cov(x, correction=0) + tensor([[ 0.6667, -0.6667], + [-0.6667, 0.6667]]) + >>> fw = torch.randint(1, 10, (3,)) + >>> fw + tensor([1, 6, 9]) + >>> aw = torch.rand(3) + >>> aw + tensor([0.4282, 0.0255, 0.4144]) + >>> torch.cov(x, fweights=fw, aweights=aw) + tensor([[ 0.4169, -0.4169], + [-0.4169, 0.4169]]) +""", +) + +add_docstr( + torch.cat, + r""" +cat(tensors, dim=0, *, out=None) -> Tensor + +Concatenates the given sequence of :attr:`seq` tensors in the given dimension. +All tensors must either have the same shape (except in the concatenating +dimension) or be a 1-D empty tensor with size ``(0,)``. + +:func:`torch.cat` can be seen as an inverse operation for :func:`torch.split` +and :func:`torch.chunk`. + +:func:`torch.cat` can be best understood via examples. + +.. seealso:: + + :func:`torch.stack` concatenates the given sequence along a new dimension. + +Args: + tensors (sequence of Tensors): any python sequence of tensors of the same type. + Non-empty tensors provided must have the same shape, except in the + cat dimension. + dim (int, optional): the dimension over which the tensors are concatenated + +Keyword args: + {out} + +Example:: + + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497]]) + >>> torch.cat((x, x, x), 0) + tensor([[ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497], + [ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497], + [ 0.6580, -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497]]) + >>> torch.cat((x, x, x), 1) + tensor([[ 0.6580, -1.0969, -0.4614, 0.6580, -1.0969, -0.4614, 0.6580, + -1.0969, -0.4614], + [-0.1034, -0.5790, 0.1497, -0.1034, -0.5790, 0.1497, -0.1034, + -0.5790, 0.1497]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.concat, + r""" +concat(tensors, dim=0, *, out=None) -> Tensor + +Alias of :func:`torch.cat`. +""", +) + +add_docstr( + torch.concatenate, + r""" +concatenate(tensors, axis=0, out=None) -> Tensor + +Alias of :func:`torch.cat`. +""", +) + +add_docstr( + torch.ceil, + r""" +ceil(input, *, out=None) -> Tensor + +Returns a new tensor with the ceil of the elements of :attr:`input`, +the smallest integer greater than or equal to each element. + +For integer inputs, follows the array-api convention of returning a +copy of the input tensor. + +.. math:: + \text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.6341, -1.4208, -1.0900, 0.5826]) + >>> torch.ceil(a) + tensor([-0., -1., -1., 1.]) +""".format( + **common_args + ), +) + +add_docstr( + torch.real, + r""" +real(input) -> Tensor + +Returns a new tensor containing real values of the :attr:`self` tensor. +The returned tensor and :attr:`self` share the same underlying storage. + +Args: + {input} + +Example:: + + >>> x=torch.randn(4, dtype=torch.cfloat) + >>> x + tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)]) + >>> x.real + tensor([ 0.3100, -0.5445, -1.6492, -0.0638]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.imag, + r""" +imag(input) -> Tensor + +Returns a new tensor containing imaginary values of the :attr:`self` tensor. +The returned tensor and :attr:`self` share the same underlying storage. + +.. warning:: + :func:`imag` is only supported for tensors with complex dtypes. + +Args: + {input} + +Example:: + + >>> x=torch.randn(4, dtype=torch.cfloat) + >>> x + tensor([(0.3100+0.3553j), (-0.5445-0.7896j), (-1.6492-0.0633j), (-0.0638-0.8119j)]) + >>> x.imag + tensor([ 0.3553, -0.7896, -0.0633, -0.8119]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.view_as_real, + r""" +view_as_real(input) -> Tensor + +Returns a view of :attr:`input` as a real tensor. For an input complex tensor of +:attr:`size` :math:`m1, m2, \dots, mi`, this function returns a new +real tensor of size :math:`m1, m2, \dots, mi, 2`, where the last dimension of size 2 +represents the real and imaginary components of complex numbers. + +.. warning:: + :func:`view_as_real` is only supported for tensors with ``complex dtypes``. + +Args: + {input} + +Example:: + + >>> x=torch.randn(4, dtype=torch.cfloat) + >>> x + tensor([(0.4737-0.3839j), (-0.2098-0.6699j), (0.3470-0.9451j), (-0.5174-1.3136j)]) + >>> torch.view_as_real(x) + tensor([[ 0.4737, -0.3839], + [-0.2098, -0.6699], + [ 0.3470, -0.9451], + [-0.5174, -1.3136]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.view_as_complex, + r""" +view_as_complex(input) -> Tensor + +Returns a view of :attr:`input` as a complex tensor. For an input complex +tensor of :attr:`size` :math:`m1, m2, \dots, mi, 2`, this function returns a +new complex tensor of :attr:`size` :math:`m1, m2, \dots, mi` where the last +dimension of the input tensor is expected to represent the real and imaginary +components of complex numbers. + +.. warning:: + :func:`view_as_complex` is only supported for tensors with + :class:`torch.dtype` ``torch.float64`` and ``torch.float32``. The input is + expected to have the last dimension of :attr:`size` 2. In addition, the + tensor must have a `stride` of 1 for its last dimension. The strides of all + other dimensions must be even numbers. + +Args: + {input} + +Example:: + + >>> x=torch.randn(4, 2) + >>> x + tensor([[ 1.6116, -0.5772], + [-1.4606, -0.9120], + [ 0.0786, -1.7497], + [-0.6561, -1.6623]]) + >>> torch.view_as_complex(x) + tensor([(1.6116-0.5772j), (-1.4606-0.9120j), (0.0786-1.7497j), (-0.6561-1.6623j)]) +""".format( + **common_args + ), +) + +add_docstr( + torch.reciprocal, + r""" +reciprocal(input, *, out=None) -> Tensor + +Returns a new tensor with the reciprocal of the elements of :attr:`input` + +.. math:: + \text{out}_{i} = \frac{1}{\text{input}_{i}} + +.. note:: + Unlike NumPy's reciprocal, torch.reciprocal supports integral inputs. Integral + inputs to reciprocal are automatically :ref:`promoted ` to + the default scalar type. +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.4595, -2.1219, -1.4314, 0.7298]) + >>> torch.reciprocal(a) + tensor([-2.1763, -0.4713, -0.6986, 1.3702]) +""".format( + **common_args + ), +) + +add_docstr( + torch.cholesky, + r""" +cholesky(input, upper=False, *, out=None) -> Tensor + +Computes the Cholesky decomposition of a symmetric positive-definite +matrix :math:`A` or for batches of symmetric positive-definite matrices. + +If :attr:`upper` is ``True``, the returned matrix ``U`` is upper-triangular, and +the decomposition has the form: + +.. math:: + + A = U^TU + +If :attr:`upper` is ``False``, the returned matrix ``L`` is lower-triangular, and +the decomposition has the form: + +.. math:: + + A = LL^T + +If :attr:`upper` is ``True``, and :math:`A` is a batch of symmetric positive-definite +matrices, then the returned tensor will be composed of upper-triangular Cholesky factors +of each of the individual matrices. Similarly, when :attr:`upper` is ``False``, the returned +tensor will be composed of lower-triangular Cholesky factors of each of the individual +matrices. + +.. warning:: + + :func:`torch.cholesky` is deprecated in favor of :func:`torch.linalg.cholesky` + and will be removed in a future PyTorch release. + + ``L = torch.cholesky(A)`` should be replaced with + + .. code:: python + + L = torch.linalg.cholesky(A) + + ``U = torch.cholesky(A, upper=True)`` should be replaced with + + .. code:: python + + U = torch.linalg.cholesky(A).mH + + This transform will produce equivalent results for all valid (symmetric positive definite) inputs. + +Args: + input (Tensor): the input tensor :math:`A` of size :math:`(*, n, n)` where `*` is zero or more + batch dimensions consisting of symmetric positive-definite matrices. + upper (bool, optional): flag that indicates whether to return a + upper or lower triangular matrix. Default: ``False`` + +Keyword args: + out (Tensor, optional): the output matrix + +Example:: + + >>> a = torch.randn(3, 3) + >>> a = a @ a.mT + 1e-3 # make symmetric positive-definite + >>> l = torch.cholesky(a) + >>> a + tensor([[ 2.4112, -0.7486, 1.4551], + [-0.7486, 1.3544, 0.1294], + [ 1.4551, 0.1294, 1.6724]]) + >>> l + tensor([[ 1.5528, 0.0000, 0.0000], + [-0.4821, 1.0592, 0.0000], + [ 0.9371, 0.5487, 0.7023]]) + >>> l @ l.mT + tensor([[ 2.4112, -0.7486, 1.4551], + [-0.7486, 1.3544, 0.1294], + [ 1.4551, 0.1294, 1.6724]]) + >>> a = torch.randn(3, 2, 2) # Example for batched input + >>> a = a @ a.mT + 1e-03 # make symmetric positive-definite + >>> l = torch.cholesky(a) + >>> z = l @ l.mT + >>> torch.dist(z, a) + tensor(2.3842e-07) +""", +) + +add_docstr( + torch.cholesky_solve, + r""" +cholesky_solve(B, L, upper=False, *, out=None) -> Tensor + +Computes the solution of a system of linear equations with complex Hermitian +or real symmetric positive-definite lhs given its Cholesky decomposition. + +Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix, +and :math:`L` its Cholesky decomposition such that: + +.. math:: + + A = LL^{\text{H}} + +where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex, +and the transpose when :math:`L` is real-valued. + +Returns the solution :math:`X` of the following linear system: + +.. math:: + + AX = B + +Supports inputs of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :math:`A` or :math:`B` is a batch of matrices +then the output has the same batch dimensions. + +Args: + B (Tensor): right-hand side tensor of shape `(*, n, k)` + where :math:`*` is zero or more batch dimensions + L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions + consisting of lower or upper triangular Cholesky decompositions of + symmetric or Hermitian positive-definite matrices. + upper (bool, optional): flag that indicates whether :math:`L` is lower triangular + or upper triangular. Default: ``False``. + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Example:: + + >>> A = torch.randn(3, 3) + >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix + >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition + >>> B = torch.randn(3, 2) + >>> torch.cholesky_solve(B, L) + tensor([[ -8.1625, 19.6097], + [ -5.8398, 14.2387], + [ -4.3771, 10.4173]]) + >>> A.inverse() @ B + tensor([[ -8.1626, 19.6097], + [ -5.8398, 14.2387], + [ -4.3771, 10.4173]]) + + >>> A = torch.randn(3, 2, 2, dtype=torch.complex64) + >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices + >>> L = torch.linalg.cholesky(A) + >>> B = torch.randn(2, 1, dtype=torch.complex64) + >>> X = torch.cholesky_solve(B, L) + >>> torch.dist(X, A.inverse() @ B) + tensor(1.6881e-5) +""", +) + +add_docstr( + torch.cholesky_inverse, + r""" +cholesky_inverse(L, upper=False, *, out=None) -> Tensor + +Computes the inverse of a complex Hermitian or real symmetric +positive-definite matrix given its Cholesky decomposition. + +Let :math:`A` be a complex Hermitian or real symmetric positive-definite matrix, +and :math:`L` its Cholesky decomposition such that: + +.. math:: + + A = LL^{\text{H}} + +where :math:`L^{\text{H}}` is the conjugate transpose when :math:`L` is complex, +and the transpose when :math:`L` is real-valued. + +Computes the inverse matrix :math:`A^{-1}`. + +Supports input of float, double, cfloat and cdouble dtypes. +Also supports batches of matrices, and if :math:`A` is a batch of matrices +then the output has the same batch dimensions. + +Args: + L (Tensor): tensor of shape `(*, n, n)` where `*` is zero or more batch dimensions + consisting of lower or upper triangular Cholesky decompositions of + symmetric or Hermitian positive-definite matrices. + upper (bool, optional): flag that indicates whether :math:`L` is lower triangular + or upper triangular. Default: ``False`` + +Keyword args: + out (Tensor, optional): output tensor. Ignored if `None`. Default: `None`. + +Example:: + + >>> A = torch.randn(3, 3) + >>> A = A @ A.T + torch.eye(3) * 1e-3 # Creates a symmetric positive-definite matrix + >>> L = torch.linalg.cholesky(A) # Extract Cholesky decomposition + >>> torch.cholesky_inverse(L) + tensor([[ 1.9314, 1.2251, -0.0889], + [ 1.2251, 2.4439, 0.2122], + [-0.0889, 0.2122, 0.1412]]) + >>> A.inverse() + tensor([[ 1.9314, 1.2251, -0.0889], + [ 1.2251, 2.4439, 0.2122], + [-0.0889, 0.2122, 0.1412]]) + + >>> A = torch.randn(3, 2, 2, dtype=torch.complex64) + >>> A = A @ A.mH + torch.eye(2) * 1e-3 # Batch of Hermitian positive-definite matrices + >>> L = torch.linalg.cholesky(A) + >>> torch.dist(torch.inverse(A), torch.cholesky_inverse(L)) + tensor(5.6358e-7) +""", +) + +add_docstr( + torch.clone, + r""" +clone(input, *, memory_format=torch.preserve_format) -> Tensor + +Returns a copy of :attr:`input`. + +.. note:: + + This function is differentiable, so gradients will flow back from the + result of this operation to :attr:`input`. To create a tensor without an + autograd relationship to :attr:`input` see :meth:`~Tensor.detach`. + +Args: + {input} + +Keyword args: + {memory_format} +""".format( + **common_args + ), +) + +add_docstr( + torch.clamp, + r""" +clamp(input, min=None, max=None, *, out=None) -> Tensor + +Clamps all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]`. +Letting min_value and max_value be :attr:`min` and :attr:`max`, respectively, this returns: + +.. math:: + y_i = \min(\max(x_i, \text{min\_value}_i), \text{max\_value}_i) + +If :attr:`min` is ``None``, there is no lower bound. +Or, if :attr:`max` is ``None`` there is no upper bound. +""" + + r""" + +.. note:: + If :attr:`min` is greater than :attr:`max` :func:`torch.clamp(..., min, max) ` + sets all elements in :attr:`input` to the value of :attr:`max`. + +Args: + {input} + min (Number or Tensor, optional): lower-bound of the range to be clamped to + max (Number or Tensor, optional): upper-bound of the range to be clamped to + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-1.7120, 0.1734, -0.0478, -0.0922]) + >>> torch.clamp(a, min=-0.5, max=0.5) + tensor([-0.5000, 0.1734, -0.0478, -0.0922]) + + >>> min = torch.linspace(-1, 1, steps=4) + >>> torch.clamp(a, min=min) + tensor([-1.0000, 0.1734, 0.3333, 1.0000]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.clip, + r""" +clip(input, min=None, max=None, *, out=None) -> Tensor + +Alias for :func:`torch.clamp`. +""", +) + +add_docstr( + torch.column_stack, + r""" +column_stack(tensors, *, out=None) -> Tensor + +Creates a new tensor by horizontally stacking the tensors in :attr:`tensors`. + +Equivalent to ``torch.hstack(tensors)``, except each zero or one dimensional tensor ``t`` +in :attr:`tensors` is first reshaped into a ``(t.numel(), 1)`` column before being stacked horizontally. + +Args: + tensors (sequence of Tensors): sequence of tensors to concatenate + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([1, 2, 3]) + >>> b = torch.tensor([4, 5, 6]) + >>> torch.column_stack((a, b)) + tensor([[1, 4], + [2, 5], + [3, 6]]) + >>> a = torch.arange(5) + >>> b = torch.arange(10).reshape(5, 2) + >>> torch.column_stack((a, b, b)) + tensor([[0, 0, 1, 0, 1], + [1, 2, 3, 2, 3], + [2, 4, 5, 4, 5], + [3, 6, 7, 6, 7], + [4, 8, 9, 8, 9]]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.complex, + r""" +complex(real, imag, *, out=None) -> Tensor + +Constructs a complex tensor with its real part equal to :attr:`real` and its +imaginary part equal to :attr:`imag`. + +Args: + real (Tensor): The real part of the complex tensor. Must be half, float or double. + imag (Tensor): The imaginary part of the complex tensor. Must be same dtype + as :attr:`real`. + +Keyword args: + out (Tensor): If the inputs are ``torch.float32``, must be + ``torch.complex64``. If the inputs are ``torch.float64``, must be + ``torch.complex128``. + +Example:: + + >>> real = torch.tensor([1, 2], dtype=torch.float32) + >>> imag = torch.tensor([3, 4], dtype=torch.float32) + >>> z = torch.complex(real, imag) + >>> z + tensor([(1.+3.j), (2.+4.j)]) + >>> z.dtype + torch.complex64 + +""", +) + +add_docstr( + torch.polar, + r""" +polar(abs, angle, *, out=None) -> Tensor + +Constructs a complex tensor whose elements are Cartesian coordinates +corresponding to the polar coordinates with absolute value :attr:`abs` and angle +:attr:`angle`. + +.. math:: + \text{out} = \text{abs} \cdot \cos(\text{angle}) + \text{abs} \cdot \sin(\text{angle}) \cdot j + +.. note:: + `torch.polar` is similar to + `std::polar `_ + and does not compute the polar decomposition + of a complex tensor like Python's `cmath.polar` and SciPy's `linalg.polar` do. + The behavior of this function is undefined if `abs` is negative or NaN, or if `angle` is + infinite. + +""" + + r""" +Args: + abs (Tensor): The absolute value the complex tensor. Must be float or double. + angle (Tensor): The angle of the complex tensor. Must be same dtype as + :attr:`abs`. + +Keyword args: + out (Tensor): If the inputs are ``torch.float32``, must be + ``torch.complex64``. If the inputs are ``torch.float64``, must be + ``torch.complex128``. + +Example:: + + >>> import numpy as np + >>> abs = torch.tensor([1, 2], dtype=torch.float64) + >>> angle = torch.tensor([np.pi / 2, 5 * np.pi / 4], dtype=torch.float64) + >>> z = torch.polar(abs, angle) + >>> z + tensor([(0.0000+1.0000j), (-1.4142-1.4142j)], dtype=torch.complex128) +""", +) + +add_docstr( + torch.conj_physical, + r""" +conj_physical(input, *, out=None) -> Tensor + +Computes the element-wise conjugate of the given :attr:`input` tensor. +If :attr:`input` has a non-complex dtype, this function just returns :attr:`input`. + +.. note:: + This performs the conjugate operation regardless of the fact conjugate bit is set or not. + +.. warning:: In the future, :func:`torch.conj_physical` may return a non-writeable view for an :attr:`input` of + non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical` + when :attr:`input` is of non-complex dtype to be compatible with this change. + +.. math:: + \text{out}_{i} = conj(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.conj_physical(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j])) + tensor([-1 - 1j, -2 - 2j, 3 + 3j]) +""".format( + **common_args + ), +) + +add_docstr( + torch.conj, + r""" +conj(input) -> Tensor + +Returns a view of :attr:`input` with a flipped conjugate bit. If :attr:`input` has a non-complex dtype, +this function just returns :attr:`input`. + +.. note:: + :func:`torch.conj` performs a lazy conjugation, but the actual conjugated tensor can be materialized + at any time using :func:`torch.resolve_conj`. + +.. warning:: In the future, :func:`torch.conj` may return a non-writeable view for an :attr:`input` of + non-complex dtype. It's recommended that programs not modify the tensor returned by :func:`torch.conj_physical` + when :attr:`input` is of non-complex dtype to be compatible with this change. + +Args: + {input} + +Example:: + + >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]) + >>> x.is_conj() + False + >>> y = torch.conj(x) + >>> y.is_conj() + True +""".format( + **common_args + ), +) + +add_docstr( + torch.resolve_conj, + r""" +resolve_conj(input) -> Tensor + +Returns a new tensor with materialized conjugation if :attr:`input`'s conjugate bit is set to `True`, +else returns :attr:`input`. The output tensor will always have its conjugate bit set to `False`. + +Args: + {input} + +Example:: + + >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]) + >>> y = x.conj() + >>> y.is_conj() + True + >>> z = y.resolve_conj() + >>> z + tensor([-1 - 1j, -2 - 2j, 3 + 3j]) + >>> z.is_conj() + False +""".format( + **common_args + ), +) + +add_docstr( + torch.resolve_neg, + r""" +resolve_neg(input) -> Tensor + +Returns a new tensor with materialized negation if :attr:`input`'s negative bit is set to `True`, +else returns :attr:`input`. The output tensor will always have its negative bit set to `False`. + +Args: + {input} + +Example:: + + >>> x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]) + >>> y = x.conj() + >>> z = y.imag + >>> z.is_neg() + True + >>> out = z.resolve_neg() + >>> out + tensor([-1., -2., 3.]) + >>> out.is_neg() + False +""".format( + **common_args + ), +) + +add_docstr( + torch.copysign, + r""" +copysign(input, other, *, out=None) -> Tensor + +Create a new floating-point tensor with the magnitude of :attr:`input` and the sign of :attr:`other`, elementwise. + +.. math:: + \text{out}_{i} = \begin{cases} + -|\text{input}_{i}| & \text{if } \text{other}_{i} \leq -0.0 \\ + |\text{input}_{i}| & \text{if } \text{other}_{i} \geq 0.0 \\ + \end{cases} +""" + + r""" + +Supports :ref:`broadcasting to a common shape `, +and integer and float inputs. + +Args: + input (Tensor): magnitudes. + other (Tensor or Number): contains value(s) whose signbit(s) are + applied to the magnitudes in :attr:`input`. + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(5) + >>> a + tensor([-1.2557, -0.0026, -0.5387, 0.4740, -0.9244]) + >>> torch.copysign(a, 1) + tensor([1.2557, 0.0026, 0.5387, 0.4740, 0.9244]) + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.7079, 0.2778, -1.0249, 0.5719], + [-0.0059, -0.2600, -0.4475, -1.3948], + [ 0.3667, -0.9567, -2.5757, -0.1751], + [ 0.2046, -0.0742, 0.2998, -0.1054]]) + >>> b = torch.randn(4) + tensor([ 0.2373, 0.3120, 0.3190, -1.1128]) + >>> torch.copysign(a, b) + tensor([[ 0.7079, 0.2778, 1.0249, -0.5719], + [ 0.0059, 0.2600, 0.4475, -1.3948], + [ 0.3667, 0.9567, 2.5757, -0.1751], + [ 0.2046, 0.0742, 0.2998, -0.1054]]) + >>> a = torch.tensor([1.]) + >>> b = torch.tensor([-0.]) + >>> torch.copysign(a, b) + tensor([-1.]) + +.. note:: + copysign handles signed zeros. If the other argument has a negative zero (-0), + the corresponding output value will be negative. + +""".format( + **common_args + ), +) + +add_docstr( + torch.cos, + r""" +cos(input, *, out=None) -> Tensor + +Returns a new tensor with the cosine of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \cos(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 1.4309, 1.2706, -0.8562, 0.9796]) + >>> torch.cos(a) + tensor([ 0.1395, 0.2957, 0.6553, 0.5574]) +""".format( + **common_args + ), +) + +add_docstr( + torch.cosh, + r""" +cosh(input, *, out=None) -> Tensor + +Returns a new tensor with the hyperbolic cosine of the elements of +:attr:`input`. + +.. math:: + \text{out}_{i} = \cosh(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.1632, 1.1835, -0.6979, -0.7325]) + >>> torch.cosh(a) + tensor([ 1.0133, 1.7860, 1.2536, 1.2805]) + +.. note:: + When :attr:`input` is on the CPU, the implementation of torch.cosh may use + the Sleef library, which rounds very large results to infinity or negative + infinity. See `here `_ for details. +""".format( + **common_args + ), +) + +add_docstr( + torch.cross, + r""" +cross(input, other, dim=None, *, out=None) -> Tensor + + +Returns the cross product of vectors in dimension :attr:`dim` of :attr:`input` +and :attr:`other`. + +Supports input of float, double, cfloat and cdouble dtypes. Also supports batches +of vectors, for which it computes the product along the dimension :attr:`dim`. +In this case, the output has the same batch dimensions as the inputs. + +.. warning:: + If :attr:`dim` is not given, it defaults to the first dimension found + with the size 3. Note that this might be unexpected. + + This behavior is deprecated and will be changed to match that of :func:`torch.linalg.cross` + in a future release. + +.. seealso:: + :func:`torch.linalg.cross` which has dim=-1 as default. + + +Args: + {input} + other (Tensor): the second input tensor + dim (int, optional): the dimension to take the cross-product in. + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4, 3) + >>> a + tensor([[-0.3956, 1.1455, 1.6895], + [-0.5849, 1.3672, 0.3599], + [-1.1626, 0.7180, -0.0521], + [-0.1339, 0.9902, -2.0225]]) + >>> b = torch.randn(4, 3) + >>> b + tensor([[-0.0257, -1.4725, -1.2251], + [-1.1479, -0.7005, -1.9757], + [-1.3904, 0.3726, -1.1836], + [-0.9688, -0.7153, 0.2159]]) + >>> torch.cross(a, b, dim=1) + tensor([[ 1.0844, -0.5281, 0.6120], + [-2.4490, -1.5687, 1.9792], + [-0.8304, -1.3037, 0.5650], + [-1.2329, 1.9883, 1.0551]]) + >>> torch.cross(a, b) + tensor([[ 1.0844, -0.5281, 0.6120], + [-2.4490, -1.5687, 1.9792], + [-0.8304, -1.3037, 0.5650], + [-1.2329, 1.9883, 1.0551]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.logcumsumexp, + r""" +logcumsumexp(input, dim, *, out=None) -> Tensor +Returns the logarithm of the cumulative summation of the exponentiation of +elements of :attr:`input` in the dimension :attr:`dim`. + +For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is + + .. math:: + \text{{logcumsumexp}}(x)_{{ij}} = \log \sum\limits_{{j=0}}^{{i}} \exp(x_{{ij}}) + +Args: + {input} + dim (int): the dimension to do the operation over + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(10) + >>> torch.logcumsumexp(a, dim=0) + tensor([-0.42296738, -0.04462666, 0.86278635, 0.94622083, 1.05277811, + 1.39202815, 1.83525007, 1.84492621, 2.06084887, 2.06844475])) +""".format( + **reduceops_common_args + ), +) + +add_docstr( + torch.cummax, + r""" +cummax(input, dim, *, out=None) -> (Tensor, LongTensor) +Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative maximum of +elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index +location of each maximum value found in the dimension :attr:`dim`. + +.. math:: + y_i = max(x_1, x_2, x_3, \dots, x_i) + +Args: + {input} + dim (int): the dimension to do the operation over + +Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + +Example:: + + >>> a = torch.randn(10) + >>> a + tensor([-0.3449, -1.5447, 0.0685, -1.5104, -1.1706, 0.2259, 1.4696, -1.3284, + 1.9946, -0.8209]) + >>> torch.cummax(a, dim=0) + torch.return_types.cummax( + values=tensor([-0.3449, -0.3449, 0.0685, 0.0685, 0.0685, 0.2259, 1.4696, 1.4696, + 1.9946, 1.9946]), + indices=tensor([0, 0, 2, 2, 2, 5, 6, 6, 8, 8])) +""".format( + **reduceops_common_args + ), +) + +add_docstr( + torch.cummin, + r""" +cummin(input, dim, *, out=None) -> (Tensor, LongTensor) +Returns a namedtuple ``(values, indices)`` where ``values`` is the cumulative minimum of +elements of :attr:`input` in the dimension :attr:`dim`. And ``indices`` is the index +location of each maximum value found in the dimension :attr:`dim`. + +.. math:: + y_i = min(x_1, x_2, x_3, \dots, x_i) + +Args: + {input} + dim (int): the dimension to do the operation over + +Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + +Example:: + + >>> a = torch.randn(10) + >>> a + tensor([-0.2284, -0.6628, 0.0975, 0.2680, -1.3298, -0.4220, -0.3885, 1.1762, + 0.9165, 1.6684]) + >>> torch.cummin(a, dim=0) + torch.return_types.cummin( + values=tensor([-0.2284, -0.6628, -0.6628, -0.6628, -1.3298, -1.3298, -1.3298, -1.3298, + -1.3298, -1.3298]), + indices=tensor([0, 1, 1, 1, 4, 4, 4, 4, 4, 4])) +""".format( + **reduceops_common_args + ), +) + +add_docstr( + torch.cumprod, + r""" +cumprod(input, dim, *, dtype=None, out=None) -> Tensor + +Returns the cumulative product of elements of :attr:`input` in the dimension +:attr:`dim`. + +For example, if :attr:`input` is a vector of size N, the result will also be +a vector of size N, with elements. + +.. math:: + y_i = x_1 \times x_2\times x_3\times \dots \times x_i + +Args: + {input} + dim (int): the dimension to do the operation over + +Keyword args: + {dtype} + {out} + +Example:: + + >>> a = torch.randn(10) + >>> a + tensor([ 0.6001, 0.2069, -0.1919, 0.9792, 0.6727, 1.0062, 0.4126, + -0.2129, -0.4206, 0.1968]) + >>> torch.cumprod(a, dim=0) + tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0158, -0.0065, + 0.0014, -0.0006, -0.0001]) + + >>> a[5] = 0.0 + >>> torch.cumprod(a, dim=0) + tensor([ 0.6001, 0.1241, -0.0238, -0.0233, -0.0157, -0.0000, -0.0000, + 0.0000, -0.0000, -0.0000]) +""".format( + **reduceops_common_args + ), +) + +add_docstr( + torch.cumsum, + r""" +cumsum(input, dim, *, dtype=None, out=None) -> Tensor + +Returns the cumulative sum of elements of :attr:`input` in the dimension +:attr:`dim`. + +For example, if :attr:`input` is a vector of size N, the result will also be +a vector of size N, with elements. + +.. math:: + y_i = x_1 + x_2 + x_3 + \dots + x_i + +Args: + {input} + dim (int): the dimension to do the operation over + +Keyword args: + {dtype} + {out} + +Example:: + + >>> a = torch.randint(1, 20, (10,)) + >>> a + tensor([13, 7, 3, 10, 13, 3, 15, 10, 9, 10]) + >>> torch.cumsum(a, dim=0) + tensor([13, 20, 23, 33, 46, 49, 64, 74, 83, 93]) +""".format( + **reduceops_common_args + ), +) + +add_docstr( + torch.count_nonzero, + r""" +count_nonzero(input, dim=None) -> Tensor + +Counts the number of non-zero values in the tensor :attr:`input` along the given :attr:`dim`. +If no dim is specified then all non-zeros in the tensor are counted. + +Args: + {input} + dim (int or tuple of ints, optional): Dim or tuple of dims along which to count non-zeros. + +Example:: + + >>> x = torch.zeros(3,3) + >>> x[torch.randn(3,3) > 0.5] = 1 + >>> x + tensor([[0., 1., 1.], + [0., 0., 0.], + [0., 0., 1.]]) + >>> torch.count_nonzero(x) + tensor(3) + >>> torch.count_nonzero(x, dim=0) + tensor([0, 1, 2]) +""".format( + **reduceops_common_args + ), +) + +add_docstr( + torch.dequantize, + r""" +dequantize(tensor) -> Tensor + +Returns an fp32 Tensor by dequantizing a quantized Tensor + +Args: + tensor (Tensor): A quantized Tensor + +.. function:: dequantize(tensors) -> sequence of Tensors + :noindex: + +Given a list of quantized Tensors, dequantize them and return a list of fp32 Tensors + +Args: + tensors (sequence of Tensors): A list of quantized Tensors +""", +) + +add_docstr( + torch.diag, + r""" +diag(input, diagonal=0, *, out=None) -> Tensor + +- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor + with the elements of :attr:`input` as the diagonal. +- If :attr:`input` is a matrix (2-D tensor), then returns a 1-D tensor with + the diagonal elements of :attr:`input`. + +The argument :attr:`diagonal` controls which diagonal to consider: + +- If :attr:`diagonal` = 0, it is the main diagonal. +- If :attr:`diagonal` > 0, it is above the main diagonal. +- If :attr:`diagonal` < 0, it is below the main diagonal. + +Args: + {input} + diagonal (int, optional): the diagonal to consider + +Keyword args: + {out} + +.. seealso:: + + :func:`torch.diagonal` always returns the diagonal of its input. + + :func:`torch.diagflat` always constructs a tensor with diagonal elements + specified by the input. + +Examples: + +Get the square matrix where the input vector is the diagonal:: + + >>> a = torch.randn(3) + >>> a + tensor([ 0.5950,-0.0872, 2.3298]) + >>> torch.diag(a) + tensor([[ 0.5950, 0.0000, 0.0000], + [ 0.0000,-0.0872, 0.0000], + [ 0.0000, 0.0000, 2.3298]]) + >>> torch.diag(a, 1) + tensor([[ 0.0000, 0.5950, 0.0000, 0.0000], + [ 0.0000, 0.0000,-0.0872, 0.0000], + [ 0.0000, 0.0000, 0.0000, 2.3298], + [ 0.0000, 0.0000, 0.0000, 0.0000]]) + +Get the k-th diagonal of a given matrix:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[-0.4264, 0.0255,-0.1064], + [ 0.8795,-0.2429, 0.1374], + [ 0.1029,-0.6482,-1.6300]]) + >>> torch.diag(a, 0) + tensor([-0.4264,-0.2429,-1.6300]) + >>> torch.diag(a, 1) + tensor([ 0.0255, 0.1374]) +""".format( + **common_args + ), +) + +add_docstr( + torch.diag_embed, + r""" +diag_embed(input, offset=0, dim1=-2, dim2=-1) -> Tensor + +Creates a tensor whose diagonals of certain 2D planes (specified by +:attr:`dim1` and :attr:`dim2`) are filled by :attr:`input`. +To facilitate creating batched diagonal matrices, the 2D planes formed by +the last two dimensions of the returned tensor are chosen by default. + +The argument :attr:`offset` controls which diagonal to consider: + +- If :attr:`offset` = 0, it is the main diagonal. +- If :attr:`offset` > 0, it is above the main diagonal. +- If :attr:`offset` < 0, it is below the main diagonal. + +The size of the new matrix will be calculated to make the specified diagonal +of the size of the last input dimension. +Note that for :attr:`offset` other than :math:`0`, the order of :attr:`dim1` +and :attr:`dim2` matters. Exchanging them is equivalent to changing the +sign of :attr:`offset`. + +Applying :meth:`torch.diagonal` to the output of this function with +the same arguments yields a matrix identical to input. However, +:meth:`torch.diagonal` has different default dimensions, so those +need to be explicitly specified. + +Args: + {input} Must be at least 1-dimensional. + offset (int, optional): which diagonal to consider. Default: 0 + (main diagonal). + dim1 (int, optional): first dimension with respect to which to + take diagonal. Default: -2. + dim2 (int, optional): second dimension with respect to which to + take diagonal. Default: -1. + +Example:: + + >>> a = torch.randn(2, 3) + >>> torch.diag_embed(a) + tensor([[[ 1.5410, 0.0000, 0.0000], + [ 0.0000, -0.2934, 0.0000], + [ 0.0000, 0.0000, -2.1788]], + + [[ 0.5684, 0.0000, 0.0000], + [ 0.0000, -1.0845, 0.0000], + [ 0.0000, 0.0000, -1.3986]]]) + + >>> torch.diag_embed(a, offset=1, dim1=0, dim2=2) + tensor([[[ 0.0000, 1.5410, 0.0000, 0.0000], + [ 0.0000, 0.5684, 0.0000, 0.0000]], + + [[ 0.0000, 0.0000, -0.2934, 0.0000], + [ 0.0000, 0.0000, -1.0845, 0.0000]], + + [[ 0.0000, 0.0000, 0.0000, -2.1788], + [ 0.0000, 0.0000, 0.0000, -1.3986]], + + [[ 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.0000]]]) +""".format( + **common_args + ), +) + + +add_docstr( + torch.diagflat, + r""" +diagflat(input, offset=0) -> Tensor + +- If :attr:`input` is a vector (1-D tensor), then returns a 2-D square tensor + with the elements of :attr:`input` as the diagonal. +- If :attr:`input` is a tensor with more than one dimension, then returns a + 2-D tensor with diagonal elements equal to a flattened :attr:`input`. + +The argument :attr:`offset` controls which diagonal to consider: + +- If :attr:`offset` = 0, it is the main diagonal. +- If :attr:`offset` > 0, it is above the main diagonal. +- If :attr:`offset` < 0, it is below the main diagonal. + +Args: + {input} + offset (int, optional): the diagonal to consider. Default: 0 (main + diagonal). + +Examples:: + + >>> a = torch.randn(3) + >>> a + tensor([-0.2956, -0.9068, 0.1695]) + >>> torch.diagflat(a) + tensor([[-0.2956, 0.0000, 0.0000], + [ 0.0000, -0.9068, 0.0000], + [ 0.0000, 0.0000, 0.1695]]) + >>> torch.diagflat(a, 1) + tensor([[ 0.0000, -0.2956, 0.0000, 0.0000], + [ 0.0000, 0.0000, -0.9068, 0.0000], + [ 0.0000, 0.0000, 0.0000, 0.1695], + [ 0.0000, 0.0000, 0.0000, 0.0000]]) + + >>> a = torch.randn(2, 2) + >>> a + tensor([[ 0.2094, -0.3018], + [-0.1516, 1.9342]]) + >>> torch.diagflat(a) + tensor([[ 0.2094, 0.0000, 0.0000, 0.0000], + [ 0.0000, -0.3018, 0.0000, 0.0000], + [ 0.0000, 0.0000, -0.1516, 0.0000], + [ 0.0000, 0.0000, 0.0000, 1.9342]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.diagonal, + r""" +diagonal(input, offset=0, dim1=0, dim2=1) -> Tensor + +Returns a partial view of :attr:`input` with the its diagonal elements +with respect to :attr:`dim1` and :attr:`dim2` appended as a dimension +at the end of the shape. + +The argument :attr:`offset` controls which diagonal to consider: + +- If :attr:`offset` = 0, it is the main diagonal. +- If :attr:`offset` > 0, it is above the main diagonal. +- If :attr:`offset` < 0, it is below the main diagonal. + +Applying :meth:`torch.diag_embed` to the output of this function with +the same arguments yields a diagonal matrix with the diagonal entries +of the input. However, :meth:`torch.diag_embed` has different default +dimensions, so those need to be explicitly specified. + +Args: + {input} Must be at least 2-dimensional. + offset (int, optional): which diagonal to consider. Default: 0 + (main diagonal). + dim1 (int, optional): first dimension with respect to which to + take diagonal. Default: 0. + dim2 (int, optional): second dimension with respect to which to + take diagonal. Default: 1. + +.. note:: To take a batch diagonal, pass in dim1=-2, dim2=-1. + +Examples:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[-1.0854, 1.1431, -0.1752], + [ 0.8536, -0.0905, 0.0360], + [ 0.6927, -0.3735, -0.4945]]) + + + >>> torch.diagonal(a, 0) + tensor([-1.0854, -0.0905, -0.4945]) + + + >>> torch.diagonal(a, 1) + tensor([ 1.1431, 0.0360]) + + + >>> x = torch.randn(2, 5, 4, 2) + >>> torch.diagonal(x, offset=-1, dim1=1, dim2=2) + tensor([[[-1.2631, 0.3755, -1.5977, -1.8172], + [-1.1065, 1.0401, -0.2235, -0.7938]], + + [[-1.7325, -0.3081, 0.6166, 0.2335], + [ 1.0500, 0.7336, -0.3836, -1.1015]]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.diagonal_scatter, + r""" +diagonal_scatter(input, src, offset=0, dim1=0, dim2=1) -> Tensor + +Embeds the values of the :attr:`src` tensor into :attr:`input` along +the diagonal elements of :attr:`input`, with respect to :attr:`dim1` +and :attr:`dim2`. + +This function returns a tensor with fresh storage; it does not +return a view. + +The argument :attr:`offset` controls which diagonal to consider: + +- If :attr:`offset` = 0, it is the main diagonal. +- If :attr:`offset` > 0, it is above the main diagonal. +- If :attr:`offset` < 0, it is below the main diagonal. + +Args: + {input} Must be at least 2-dimensional. + src (Tensor): the tensor to embed into :attr:`input`. + offset (int, optional): which diagonal to consider. Default: 0 + (main diagonal). + dim1 (int, optional): first dimension with respect to which to + take diagonal. Default: 0. + dim2 (int, optional): second dimension with respect to which to + take diagonal. Default: 1. + +.. note:: + + :attr:`src` must be of the proper size in order to be embedded + into :attr:`input`. Specifically, it should have the same shape as + ``torch.diagonal(input, offset, dim1, dim2)`` + +Examples:: + + >>> a = torch.zeros(3, 3) + >>> a + tensor([[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]]) + + >>> torch.diagonal_scatter(a, torch.ones(3), 0) + tensor([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + + >>> torch.diagonal_scatter(a, torch.ones(2), 1) + tensor([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.as_strided_scatter, + r""" +as_strided_scatter(input, src, size, stride, storage_offset=None) -> Tensor + +Embeds the values of the :attr:`src` tensor into :attr:`input` along +the elements corresponding to the result of calling +input.as_strided(size, stride, storage_offset). + +This function returns a tensor with fresh storage; it does not +return a view. + +Args: + {input} + size (tuple or ints): the shape of the output tensor + stride (tuple or ints): the stride of the output tensor + storage_offset (int, optional): the offset in the underlying storage of the output tensor + +.. note:: + + :attr:`src` must be of the proper size in order to be embedded + into :attr:`input`. Specifically, it should have the same shape as + `torch.as_strided(input, size, stride, storage_offset)` + +Example:: + + >>> a = torch.arange(4).reshape(2, 2) + 1 + >>> a + tensor([[1, 2], + [3, 4]]) + >>> b = torch.zeros(3, 3) + >>> b + tensor([[0., 0., 0.], + [0., 0., 0.], + [0., 0., 0.]]) + >>> torch.as_strided_scatter(b, a, (2, 2), (1, 2)) + tensor([[1., 3., 2.], + [4., 0., 0.], + [0., 0., 0.]]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.diff, + r""" +diff(input, n=1, dim=-1, prepend=None, append=None) -> Tensor + +Computes the n-th forward difference along the given dimension. + +The first-order differences are given by `out[i] = input[i + 1] - input[i]`. Higher-order +differences are calculated by using :func:`torch.diff` recursively. + +Args: + input (Tensor): the tensor to compute the differences on + n (int, optional): the number of times to recursively compute the difference + dim (int, optional): the dimension to compute the difference along. + Default is the last dimension. + prepend, append (Tensor, optional): values to prepend or append to + :attr:`input` along :attr:`dim` before computing the difference. + Their dimensions must be equivalent to that of input, and their shapes + must match input's shape except on :attr:`dim`. + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([1, 3, 2]) + >>> torch.diff(a) + tensor([ 2, -1]) + >>> b = torch.tensor([4, 5]) + >>> torch.diff(a, append=b) + tensor([ 2, -1, 2, 1]) + >>> c = torch.tensor([[1, 2, 3], [3, 4, 5]]) + >>> torch.diff(c, dim=0) + tensor([[2, 2, 2]]) + >>> torch.diff(c, dim=1) + tensor([[1, 1], + [1, 1]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.digamma, + r""" +digamma(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.digamma`. +""", +) + +add_docstr( + torch.dist, + r""" +dist(input, other, p=2) -> Tensor + +Returns the p-norm of (:attr:`input` - :attr:`other`) + +The shapes of :attr:`input` and :attr:`other` must be +:ref:`broadcastable `. + +Args: + {input} + other (Tensor): the Right-hand-side input tensor + p (float, optional): the norm to be computed + +Example:: + + >>> x = torch.randn(4) + >>> x + tensor([-1.5393, -0.8675, 0.5916, 1.6321]) + >>> y = torch.randn(4) + >>> y + tensor([ 0.0967, -1.0511, 0.6295, 0.8360]) + >>> torch.dist(x, y, 3.5) + tensor(1.6727) + >>> torch.dist(x, y, 3) + tensor(1.6973) + >>> torch.dist(x, y, 0) + tensor(4.) + >>> torch.dist(x, y, 1) + tensor(2.6537) +""".format( + **common_args + ), +) + +add_docstr( + torch.div, + r""" +div(input, other, *, rounding_mode=None, out=None) -> Tensor + +Divides each element of the input ``input`` by the corresponding element of +:attr:`other`. + +.. math:: + \text{{out}}_i = \frac{{\text{{input}}_i}}{{\text{{other}}_i}} + +.. note:: + By default, this performs a "true" division like Python 3. + See the :attr:`rounding_mode` argument for floor division. + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer, float, and complex inputs. +Always promotes integer types to the default scalar type. + +Args: + input (Tensor): the dividend + other (Tensor or Number): the divisor + +Keyword args: + rounding_mode (str, optional): Type of rounding applied to the result: + + * None - default behavior. Performs no rounding and, if both :attr:`input` and + :attr:`other` are integer types, promotes the inputs to the default scalar type. + Equivalent to true division in Python (the ``/`` operator) and NumPy's ``np.true_divide``. + * ``"trunc"`` - rounds the results of the division towards zero. + Equivalent to C-style integer division. + * ``"floor"`` - rounds the results of the division down. + Equivalent to floor division in Python (the ``//`` operator) and NumPy's ``np.floor_divide``. + + {out} + +Examples:: + + >>> x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637]) + >>> torch.div(x, 0.5) + tensor([ 0.7620, 2.5548, -0.5944, -0.7438, 0.9274]) + + >>> a = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917], + ... [ 0.1815, -1.0111, 0.9805, -1.5923], + ... [ 0.1062, 1.4581, 0.7759, -1.2344], + ... [-0.1830, -0.0313, 1.1908, -1.4757]]) + >>> b = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308]) + >>> torch.div(a, b) + tensor([[-0.4620, -6.6051, 0.5676, 1.2639], + [ 0.2260, -3.4509, -1.2086, 6.8990], + [ 0.1322, 4.9764, -0.9564, 5.3484], + [-0.2278, -0.1068, -1.4678, 6.3938]]) + + >>> torch.div(a, b, rounding_mode='trunc') + tensor([[-0., -6., 0., 1.], + [ 0., -3., -1., 6.], + [ 0., 4., -0., 5.], + [-0., -0., -1., 6.]]) + + >>> torch.div(a, b, rounding_mode='floor') + tensor([[-1., -7., 0., 1.], + [ 0., -4., -2., 6.], + [ 0., 4., -1., 5.], + [-1., -1., -2., 6.]]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.divide, + r""" +divide(input, other, *, rounding_mode=None, out=None) -> Tensor + +Alias for :func:`torch.div`. +""", +) + +add_docstr( + torch.dot, + r""" +dot(input, other, *, out=None) -> Tensor + +Computes the dot product of two 1D tensors. + +.. note:: + + Unlike NumPy's dot, torch.dot intentionally only supports computing the dot product + of two 1D tensors with the same number of elements. + +Args: + input (Tensor): first tensor in the dot product, must be 1D. + other (Tensor): second tensor in the dot product, must be 1D. + +Keyword args: + {out} + +Example:: + + >>> torch.dot(torch.tensor([2, 3]), torch.tensor([2, 1])) + tensor(7) +""".format( + **common_args + ), +) + +add_docstr( + torch.vdot, + r""" +vdot(input, other, *, out=None) -> Tensor + +Computes the dot product of two 1D vectors along a dimension. + +In symbols, this function computes + +.. math:: + + \sum_{i=1}^n \overline{x_i}y_i. + +where :math:`\overline{x_i}` denotes the conjugate for complex +vectors, and it is the identity for real vectors. + +.. note:: + + Unlike NumPy's vdot, torch.vdot intentionally only supports computing the dot product + of two 1D tensors with the same number of elements. + +.. seealso:: + + :func:`torch.linalg.vecdot` computes the dot product of two batches of vectors along a dimension. + +Args: + input (Tensor): first tensor in the dot product, must be 1D. Its conjugate is used if it's complex. + other (Tensor): second tensor in the dot product, must be 1D. + +Keyword args: +""" + + rf""" +.. note:: {common_args["out"]} +""" + + r""" + +Example:: + + >>> torch.vdot(torch.tensor([2, 3]), torch.tensor([2, 1])) + tensor(7) + >>> a = torch.tensor((1 +2j, 3 - 1j)) + >>> b = torch.tensor((2 +1j, 4 - 0j)) + >>> torch.vdot(a, b) + tensor([16.+1.j]) + >>> torch.vdot(b, a) + tensor([16.-1.j]) +""", +) + +add_docstr( + torch.eq, + r""" +eq(input, other, *, out=None) -> Tensor + +Computes element-wise equality + +The second argument can be a number or a tensor whose shape is +:ref:`broadcastable ` with the first argument. + +Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + +Keyword args: + {out} + +Returns: + A boolean tensor that is True where :attr:`input` is equal to :attr:`other` and False elsewhere + +Example:: + + >>> torch.eq(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[ True, False], + [False, True]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.equal, + r""" +equal(input, other) -> bool + +``True`` if two tensors have the same size and elements, ``False`` otherwise. + +Example:: + + >>> torch.equal(torch.tensor([1, 2]), torch.tensor([1, 2])) + True +""", +) + +add_docstr( + torch.erf, + r""" +erf(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.erf`. +""", +) + +add_docstr( + torch.erfc, + r""" +erfc(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.erfc`. +""", +) + +add_docstr( + torch.erfinv, + r""" +erfinv(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.erfinv`. +""", +) + +add_docstr( + torch.exp, + r""" +exp(input, *, out=None) -> Tensor + +Returns a new tensor with the exponential of the elements +of the input tensor :attr:`input`. + +.. math:: + y_{i} = e^{x_{i}} +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.exp(torch.tensor([0, math.log(2.)])) + tensor([ 1., 2.]) +""".format( + **common_args + ), +) + +add_docstr( + torch.exp2, + r""" +exp2(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.exp2`. +""", +) + +add_docstr( + torch.expm1, + r""" +expm1(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.expm1`. +""", +) + +add_docstr( + torch.eye, + r""" +eye(n, m=None, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Returns a 2-D tensor with ones on the diagonal and zeros elsewhere. + +Args: + n (int): the number of rows + m (int, optional): the number of columns with default being :attr:`n` + +Keyword arguments: + {out} + {dtype} + {layout} + {device} + {requires_grad} + +Returns: + Tensor: A 2-D tensor with ones on the diagonal and zeros elsewhere + +Example:: + + >>> torch.eye(3) + tensor([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.floor, + r""" +floor(input, *, out=None) -> Tensor + +Returns a new tensor with the floor of the elements of :attr:`input`, +the largest integer less than or equal to each element. + +For integer inputs, follows the array-api convention of returning a +copy of the input tensor. + +.. math:: + \text{out}_{i} = \left\lfloor \text{input}_{i} \right\rfloor +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.8166, 1.5308, -0.2530, -0.2091]) + >>> torch.floor(a) + tensor([-1., 1., -1., -1.]) +""".format( + **common_args + ), +) + +add_docstr( + torch.floor_divide, + r""" +floor_divide(input, other, *, out=None) -> Tensor + +.. note:: + + Before PyTorch 1.13 :func:`torch.floor_divide` incorrectly performed + truncation division. To restore the previous behavior use + :func:`torch.div` with ``rounding_mode='trunc'``. + +Computes :attr:`input` divided by :attr:`other`, elementwise, and floors +the result. + +.. math:: + \text{{out}}_i = \text{floor} \left( \frac{{\text{{input}}_i}}{{\text{{other}}_i}} \right) + +""" + + r""" + +Supports broadcasting to a common shape, type promotion, and integer and float inputs. + +Args: + input (Tensor or Number): the dividend + other (Tensor or Number): the divisor + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([4.0, 3.0]) + >>> b = torch.tensor([2.0, 2.0]) + >>> torch.floor_divide(a, b) + tensor([2.0, 1.0]) + >>> torch.floor_divide(a, 1.4) + tensor([2.0, 2.0]) +""".format( + **common_args + ), +) + +add_docstr( + torch.fmod, + r""" +fmod(input, other, *, out=None) -> Tensor + +Applies C++'s `std::fmod `_ entrywise. +The result has the same sign as the dividend :attr:`input` and its absolute value +is less than that of :attr:`other`. + +This function may be defined in terms of :func:`torch.div` as + +.. code:: python + + torch.fmod(a, b) == a - a.div(b, rounding_mode="trunc") * b + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer and float inputs. + +.. note:: + + When the divisor is zero, returns ``NaN`` for floating point dtypes + on both CPU and GPU; raises ``RuntimeError`` for integer division by + zero on CPU; Integer division by zero on GPU may return any value. + +.. note:: + + Complex inputs are not supported. In some cases, it is not mathematically + possible to satisfy the definition of a modulo operation with complex numbers. + +.. seealso:: + + :func:`torch.remainder` which implements Python's modulus operator. + This one is defined using division rounding down the result. + +Args: + input (Tensor): the dividend + other (Tensor or Scalar): the divisor + +Keyword args: + {out} + +Example:: + + >>> torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) + tensor([-1., -0., -1., 1., 0., 1.]) + >>> torch.fmod(torch.tensor([1, 2, 3, 4, 5]), -1.5) + tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.frac, + r""" +frac(input, *, out=None) -> Tensor + +Computes the fractional portion of each element in :attr:`input`. + +.. math:: + \text{out}_{i} = \text{input}_{i} - \left\lfloor |\text{input}_{i}| \right\rfloor * \operatorname{sgn}(\text{input}_{i}) + +Example:: + + >>> torch.frac(torch.tensor([1, 2.5, -3.2])) + tensor([ 0.0000, 0.5000, -0.2000]) +""", +) + +add_docstr( + torch.frexp, + r""" +frexp(input, *, out=None) -> (Tensor mantissa, Tensor exponent) + +Decomposes :attr:`input` into mantissa and exponent tensors +such that :math:`\text{input} = \text{mantissa} \times 2^{\text{exponent}}`. + +The range of mantissa is the open interval (-1, 1). + +Supports float inputs. + +Args: + input (Tensor): the input tensor + + +Keyword args: + out (tuple, optional): the output tensors + +Example:: + + >>> x = torch.arange(9.) + >>> mantissa, exponent = torch.frexp(x) + >>> mantissa + tensor([0.0000, 0.5000, 0.5000, 0.7500, 0.5000, 0.6250, 0.7500, 0.8750, 0.5000]) + >>> exponent + tensor([0, 1, 2, 2, 3, 3, 3, 3, 4], dtype=torch.int32) + >>> torch.ldexp(mantissa, exponent) + tensor([0., 1., 2., 3., 4., 5., 6., 7., 8.]) +""", +) + +add_docstr( + torch.from_numpy, + r""" +from_numpy(ndarray) -> Tensor + +Creates a :class:`Tensor` from a :class:`numpy.ndarray`. + +The returned tensor and :attr:`ndarray` share the same memory. Modifications to +the tensor will be reflected in the :attr:`ndarray` and vice versa. The returned +tensor is not resizable. + +It currently accepts :attr:`ndarray` with dtypes of ``numpy.float64``, +``numpy.float32``, ``numpy.float16``, ``numpy.complex64``, ``numpy.complex128``, +``numpy.int64``, ``numpy.int32``, ``numpy.int16``, ``numpy.int8``, ``numpy.uint8``, +and ``bool``. + +.. warning:: + Writing to a tensor created from a read-only NumPy array is not supported and will result in undefined behavior. + +Example:: + + >>> a = numpy.array([1, 2, 3]) + >>> t = torch.from_numpy(a) + >>> t + tensor([ 1, 2, 3]) + >>> t[0] = -1 + >>> a + array([-1, 2, 3]) +""", +) + +add_docstr( + torch.frombuffer, + r""" +frombuffer(buffer, *, dtype, count=-1, offset=0, requires_grad=False) -> Tensor + +Creates a 1-dimensional :class:`Tensor` from an object that implements +the Python buffer protocol. + +Skips the first :attr:`offset` bytes in the buffer, and interprets the rest of +the raw bytes as a 1-dimensional tensor of type :attr:`dtype` with :attr:`count` +elements. + +Note that either of the following must be true: + +1. :attr:`count` is a positive non-zero number, and the total number of bytes +in the buffer is more than :attr:`offset` plus :attr:`count` times the size +(in bytes) of :attr:`dtype`. + +2. :attr:`count` is negative, and the length (number of bytes) of the buffer +subtracted by the :attr:`offset` is a multiple of the size (in bytes) of +:attr:`dtype`. + +The returned tensor and buffer share the same memory. Modifications to +the tensor will be reflected in the buffer and vice versa. The returned +tensor is not resizable. + +.. note:: + This function increments the reference count for the object that + owns the shared memory. Therefore, such memory will not be deallocated + before the returned tensor goes out of scope. + +.. warning:: + This function's behavior is undefined when passed an object implementing + the buffer protocol whose data is not on the CPU. Doing so is likely to + cause a segmentation fault. + +.. warning:: + This function does not try to infer the :attr:`dtype` (hence, it is not + optional). Passing a different :attr:`dtype` than its source may result + in unexpected behavior. + +Args: + buffer (object): a Python object that exposes the buffer interface. + +Keyword args: + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + count (int, optional): the number of desired elements to be read. + If negative, all the elements (until the end of the buffer) will be + read. Default: -1. + offset (int, optional): the number of bytes to skip at the start of + the buffer. Default: 0. + {requires_grad} + +Example:: + + >>> import array + >>> a = array.array('i', [1, 2, 3]) + >>> t = torch.frombuffer(a, dtype=torch.int32) + >>> t + tensor([ 1, 2, 3]) + >>> t[0] = -1 + >>> a + array([-1, 2, 3]) + + >>> # Interprets the signed char bytes as 32-bit integers. + >>> # Each 4 signed char elements will be interpreted as + >>> # 1 signed 32-bit integer. + >>> import array + >>> a = array.array('b', [-1, 0, 0, 0]) + >>> torch.frombuffer(a, dtype=torch.int32) + tensor([255], dtype=torch.int32) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.from_file, + r""" +from_file(filename, shared=None, size=0, *, dtype=None, layout=None, device=None, pin_memory=False) + +Creates a CPU tensor with a storage backed by a memory-mapped file. + +If ``shared`` is True, then memory is shared between processes. All changes are written to the file. +If ``shared`` is False, then changes to the tensor do not affect the file. + +``size`` is the number of elements in the Tensor. If ``shared`` is ``False``, then the file must contain +at least ``size * sizeof(dtype)`` bytes. If ``shared`` is ``True`` the file will be created if needed. + +.. note:: + Only CPU tensors can be mapped to files. + +.. note:: + For now, tensors with storages backed by a memory-mapped file cannot be created in pinned memory. + + +Args: + filename (str): file name to map + shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the + underlying `mmap(2) call `_) + size (int): number of elements in the tensor + +Keyword args: + {dtype} + {layout} + {device} + {pin_memory} + +Example:: + >>> t = torch.randn(2, 5, dtype=torch.float64) + >>> t.numpy().tofile('storage.pt') + >>> t_mapped = torch.from_file('storage.pt', shared=False, size=10, dtype=torch.float64) + """.format( + **factory_common_args + ), +) + +add_docstr( + torch.flatten, + r""" +flatten(input, start_dim=0, end_dim=-1) -> Tensor + +Flattens :attr:`input` by reshaping it into a one-dimensional tensor. If :attr:`start_dim` or :attr:`end_dim` +are passed, only dimensions starting with :attr:`start_dim` and ending with :attr:`end_dim` are flattened. +The order of elements in :attr:`input` is unchanged. + +Unlike NumPy's flatten, which always copies input's data, this function may return the original object, a view, +or copy. If no dimensions are flattened, then the original object :attr:`input` is returned. Otherwise, if input can +be viewed as the flattened shape, then that view is returned. Finally, only if the input cannot be viewed as the +flattened shape is input's data copied. See :meth:`torch.Tensor.view` for details on when a view will be returned. + +.. note:: + Flattening a zero-dimensional tensor will return a one-dimensional view. + +Args: + {input} + start_dim (int): the first dim to flatten + end_dim (int): the last dim to flatten + +Example:: + + >>> t = torch.tensor([[[1, 2], + ... [3, 4]], + ... [[5, 6], + ... [7, 8]]]) + >>> torch.flatten(t) + tensor([1, 2, 3, 4, 5, 6, 7, 8]) + >>> torch.flatten(t, start_dim=1) + tensor([[1, 2, 3, 4], + [5, 6, 7, 8]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.unflatten, + r""" +unflatten(input, dim, sizes) -> Tensor + +Expands a dimension of the input tensor over multiple dimensions. + +.. seealso:: + + :func:`torch.flatten` the inverse of this function. It coalesces several dimensions into one. + +Args: + {input} + dim (int): Dimension to be unflattened, specified as an index into + ``input.shape``. + sizes (Tuple[int]): New shape of the unflattened dimension. + One of its elements can be `-1` in which case the corresponding output + dimension is inferred. Otherwise, the product of ``sizes`` *must* + equal ``input.shape[dim]``. + +Returns: + A View of input with the specified dimension unflattened. + +Examples:: + >>> torch.unflatten(torch.randn(3, 4, 1), 1, (2, 2)).shape + torch.Size([3, 2, 2, 1]) + >>> torch.unflatten(torch.randn(3, 4, 1), 1, (-1, 2)).shape + torch.Size([3, 2, 2, 1]) + >>> torch.unflatten(torch.randn(5, 12, 3), -2, (2, 2, 3, 1, 1)).shape + torch.Size([5, 2, 2, 3, 1, 1, 3]) +""".format( + **common_args + ), +) + +add_docstr( + torch.gather, + r""" +gather(input, dim, index, *, sparse_grad=False, out=None) -> Tensor + +Gathers values along an axis specified by `dim`. + +For a 3-D tensor the output is specified by:: + + out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0 + out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1 + out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2 + +:attr:`input` and :attr:`index` must have the same number of dimensions. +It is also required that ``index.size(d) <= input.size(d)`` for all +dimensions ``d != dim``. :attr:`out` will have the same shape as :attr:`index`. +Note that ``input`` and ``index`` do not broadcast against each other. + +Args: + input (Tensor): the source tensor + dim (int): the axis along which to index + index (LongTensor): the indices of elements to gather + +Keyword arguments: + sparse_grad (bool, optional): If ``True``, gradient w.r.t. :attr:`input` will be a sparse tensor. + out (Tensor, optional): the destination tensor + +Example:: + + >>> t = torch.tensor([[1, 2], [3, 4]]) + >>> torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]])) + tensor([[ 1, 1], + [ 4, 3]]) +""", +) + + +add_docstr( + torch.gcd, + r""" +gcd(input, other, *, out=None) -> Tensor + +Computes the element-wise greatest common divisor (GCD) of :attr:`input` and :attr:`other`. + +Both :attr:`input` and :attr:`other` must have integer types. + +.. note:: + This defines :math:`gcd(0, 0) = 0`. + +Args: + {input} + other (Tensor): the second input tensor + +Keyword arguments: + {out} + +Example:: + + >>> a = torch.tensor([5, 10, 15]) + >>> b = torch.tensor([3, 4, 5]) + >>> torch.gcd(a, b) + tensor([1, 2, 5]) + >>> c = torch.tensor([3]) + >>> torch.gcd(a, c) + tensor([1, 1, 3]) +""".format( + **common_args + ), +) + +add_docstr( + torch.ge, + r""" +ge(input, other, *, out=None) -> Tensor + +Computes :math:`\text{input} \geq \text{other}` element-wise. +""" + + r""" + +The second argument can be a number or a tensor whose shape is +:ref:`broadcastable ` with the first argument. + +Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + +Keyword args: + {out} + +Returns: + A boolean tensor that is True where :attr:`input` is greater than or equal to :attr:`other` and False elsewhere + +Example:: + + >>> torch.ge(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[True, True], [False, True]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.greater_equal, + r""" +greater_equal(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.ge`. +""", +) + +add_docstr( + torch.gradient, + r""" +gradient(input, *, spacing=1, dim=None, edge_order=1) -> List of Tensors + +Estimates the gradient of a function :math:`g : \mathbb{R}^n \rightarrow \mathbb{R}` in +one or more dimensions using the `second-order accurate central differences method +`_ and +either first or second order estimates at the boundaries. + +The gradient of :math:`g` is estimated using samples. By default, when :attr:`spacing` is not +specified, the samples are entirely described by :attr:`input`, and the mapping of input coordinates +to an output is the same as the tensor's mapping of indices to values. For example, for a three-dimensional +:attr:`input` the function described is :math:`g : \mathbb{R}^3 \rightarrow \mathbb{R}`, and +:math:`g(1, 2, 3)\ == input[1, 2, 3]`. + +When :attr:`spacing` is specified, it modifies the relationship between :attr:`input` and input coordinates. +This is detailed in the "Keyword Arguments" section below. + +The gradient is estimated by estimating each partial derivative of :math:`g` independently. This estimation is +accurate if :math:`g` is in :math:`C^3` (it has at least 3 continuous derivatives), and the estimation can be +improved by providing closer samples. Mathematically, the value at each interior point of a partial derivative +is estimated using `Taylor’s theorem with remainder `_. +Letting :math:`x` be an interior point with :math:`x-h_l` and :math:`x+h_r` be points neighboring +it to the left and right respectively, :math:`f(x+h_r)` and :math:`f(x-h_l)` can be estimated using: + +.. math:: + \begin{aligned} + f(x+h_r) = f(x) + h_r f'(x) + {h_r}^2 \frac{f''(x)}{2} + {h_r}^3 \frac{f'''(\xi_1)}{6}, \xi_1 \in (x, x+h_r) \\ + f(x-h_l) = f(x) - h_l f'(x) + {h_l}^2 \frac{f''(x)}{2} - {h_l}^3 \frac{f'''(\xi_2)}{6}, \xi_2 \in (x, x-h_l) \\ + \end{aligned} + +Using the fact that :math:`f \in C^3` and solving the linear system, we derive: + +.. math:: + f'(x) \approx \frac{ {h_l}^2 f(x+h_r) - {h_r}^2 f(x-h_l) + + ({h_r}^2-{h_l}^2 ) f(x) }{ {h_r} {h_l}^2 + {h_r}^2 {h_l} } + +.. note:: + We estimate the gradient of functions in complex domain + :math:`g : \mathbb{C}^n \rightarrow \mathbb{C}` in the same way. + +The value of each partial derivative at the boundary points is computed differently. See edge_order below. + +Args: + input (``Tensor``): the tensor that represents the values of the function + +Keyword args: + spacing (``scalar``, ``list of scalar``, ``list of Tensor``, optional): :attr:`spacing` can be used to modify + how the :attr:`input` tensor's indices relate to sample coordinates. If :attr:`spacing` is a scalar then + the indices are multiplied by the scalar to produce the coordinates. For example, if :attr:`spacing=2` the + indices (1, 2, 3) become coordinates (2, 4, 6). If :attr:`spacing` is a list of scalars then the corresponding + indices are multiplied. For example, if :attr:`spacing=(2, -1, 3)` the indices (1, 2, 3) become coordinates (2, -2, 9). + Finally, if :attr:`spacing` is a list of one-dimensional tensors then each tensor specifies the coordinates for + the corresponding dimension. For example, if the indices are (1, 2, 3) and the tensors are (t0, t1, t2), then + the coordinates are (t0[1], t1[2], t2[3]) + + dim (``int``, ``list of int``, optional): the dimension or dimensions to approximate the gradient over. By default + the partial gradient in every dimension is computed. Note that when :attr:`dim` is specified the elements of + the :attr:`spacing` argument must correspond with the specified dims." + + edge_order (``int``, optional): 1 or 2, for `first-order + `_ or + `second-order `_ + estimation of the boundary ("edge") values, respectively. + +Examples:: + + >>> # Estimates the gradient of f(x)=x^2 at points [-2, -1, 2, 4] + >>> coordinates = (torch.tensor([-2., -1., 1., 4.]),) + >>> values = torch.tensor([4., 1., 1., 16.], ) + >>> torch.gradient(values, spacing = coordinates) + (tensor([-3., -2., 2., 5.]),) + + >>> # Estimates the gradient of the R^2 -> R function whose samples are + >>> # described by the tensor t. Implicit coordinates are [0, 1] for the outermost + >>> # dimension and [0, 1, 2, 3] for the innermost dimension, and function estimates + >>> # partial derivative for both dimensions. + >>> t = torch.tensor([[1, 2, 4, 8], [10, 20, 40, 80]]) + >>> torch.gradient(t) + (tensor([[ 9., 18., 36., 72.], + [ 9., 18., 36., 72.]]), + tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]])) + + >>> # A scalar value for spacing modifies the relationship between tensor indices + >>> # and input coordinates by multiplying the indices to find the + >>> # coordinates. For example, below the indices of the innermost + >>> # 0, 1, 2, 3 translate to coordinates of [0, 2, 4, 6], and the indices of + >>> # the outermost dimension 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = 2.0) # dim = None (implicitly [0, 1]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.5000, 0.7500, 1.5000, 2.0000], + [ 5.0000, 7.5000, 15.0000, 20.0000]])) + >>> # doubling the spacing between samples halves the estimated partial gradients. + + >>> + >>> # Estimates only the partial derivative for dimension 1 + >>> torch.gradient(t, dim = 1) # spacing = None (implicitly 1.) + (tensor([[ 1.0000, 1.5000, 3.0000, 4.0000], + [10.0000, 15.0000, 30.0000, 40.0000]]),) + + >>> # When spacing is a list of scalars, the relationship between the tensor + >>> # indices and input coordinates changes based on dimension. + >>> # For example, below, the indices of the innermost dimension 0, 1, 2, 3 translate + >>> # to coordinates of [0, 3, 6, 9], and the indices of the outermost dimension + >>> # 0, 1 translate to coordinates of [0, 2]. + >>> torch.gradient(t, spacing = [3., 2.]) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + + >>> # The following example is a replication of the previous one with explicit + >>> # coordinates. + >>> coords = (torch.tensor([0, 2]), torch.tensor([0, 3, 6, 9])) + >>> torch.gradient(t, spacing = coords) + (tensor([[ 4.5000, 9.0000, 18.0000, 36.0000], + [ 4.5000, 9.0000, 18.0000, 36.0000]]), + tensor([[ 0.3333, 0.5000, 1.0000, 1.3333], + [ 3.3333, 5.0000, 10.0000, 13.3333]])) + +""", +) + +add_docstr( + torch.geqrf, + r""" +geqrf(input, *, out=None) -> (Tensor, Tensor) + +This is a low-level function for calling LAPACK's geqrf directly. This function +returns a namedtuple (a, tau) as defined in `LAPACK documentation for geqrf`_ . + +Computes a QR decomposition of :attr:`input`. +Both `Q` and `R` matrices are stored in the same output tensor `a`. +The elements of `R` are stored on and above the diagonal. +Elementary reflectors (or Householder vectors) implicitly defining matrix `Q` +are stored below the diagonal. +The results of this function can be used together with :func:`torch.linalg.householder_product` +to obtain the `Q` matrix or +with :func:`torch.ormqr`, which uses an implicit representation of the `Q` matrix, +for an efficient matrix-matrix multiplication. + +See `LAPACK documentation for geqrf`_ for further details. + +.. note:: + See also :func:`torch.linalg.qr`, which computes Q and R matrices, and :func:`torch.linalg.lstsq` + with the ``driver="gels"`` option for a function that can solve matrix equations using a QR decomposition. + +Args: + input (Tensor): the input matrix + +Keyword args: + out (tuple, optional): the output tuple of (Tensor, Tensor). Ignored if `None`. Default: `None`. + +.. _LAPACK documentation for geqrf: + http://www.netlib.org/lapack/explore-html/df/dc5/group__variants_g_ecomputational_ga3766ea903391b5cf9008132f7440ec7b.html + +""", +) + +add_docstr( + torch.inner, + r""" +inner(input, other, *, out=None) -> Tensor + +Computes the dot product for 1D tensors. For higher dimensions, sums the product +of elements from :attr:`input` and :attr:`other` along their last dimension. + +.. note:: + + If either :attr:`input` or :attr:`other` is a scalar, the result is equivalent + to `torch.mul(input, other)`. + + If both :attr:`input` and :attr:`other` are non-scalars, the size of their last + dimension must match and the result is equivalent to `torch.tensordot(input, + other, dims=([-1], [-1]))` + +Args: + input (Tensor): First input tensor + other (Tensor): Second input tensor + +Keyword args: + out (Tensor, optional): Optional output tensor to write result into. The output + shape is `input.shape[:-1] + other.shape[:-1]`. + +Example:: + + # Dot product + >>> torch.inner(torch.tensor([1, 2, 3]), torch.tensor([0, 2, 1])) + tensor(7) + + # Multidimensional input tensors + >>> a = torch.randn(2, 3) + >>> a + tensor([[0.8173, 1.0874, 1.1784], + [0.3279, 0.1234, 2.7894]]) + >>> b = torch.randn(2, 4, 3) + >>> b + tensor([[[-0.4682, -0.7159, 0.1506], + [ 0.4034, -0.3657, 1.0387], + [ 0.9892, -0.6684, 0.1774], + [ 0.9482, 1.3261, 0.3917]], + + [[ 0.4537, 0.7493, 1.1724], + [ 0.2291, 0.5749, -0.2267], + [-0.7920, 0.3607, -0.3701], + [ 1.3666, -0.5850, -1.7242]]]) + >>> torch.inner(a, b) + tensor([[[-0.9837, 1.1560, 0.2907, 2.6785], + [ 2.5671, 0.5452, -0.6912, -1.5509]], + + [[ 0.1782, 2.9843, 0.7366, 1.5672], + [ 3.5115, -0.4864, -1.2476, -4.4337]]]) + + # Scalar input + >>> torch.inner(a, torch.tensor(2)) + tensor([[1.6347, 2.1748, 2.3567], + [0.6558, 0.2469, 5.5787]]) +""", +) + +add_docstr( + torch.outer, + r""" +outer(input, vec2, *, out=None) -> Tensor + +Outer product of :attr:`input` and :attr:`vec2`. +If :attr:`input` is a vector of size :math:`n` and :attr:`vec2` is a vector of +size :math:`m`, then :attr:`out` must be a matrix of size :math:`(n \times m)`. + +.. note:: This function does not :ref:`broadcast `. + +Args: + input (Tensor): 1-D input vector + vec2 (Tensor): 1-D input vector + +Keyword args: + out (Tensor, optional): optional output matrix + +Example:: + + >>> v1 = torch.arange(1., 5.) + >>> v2 = torch.arange(1., 4.) + >>> torch.outer(v1, v2) + tensor([[ 1., 2., 3.], + [ 2., 4., 6.], + [ 3., 6., 9.], + [ 4., 8., 12.]]) +""", +) + +add_docstr( + torch.ger, + r""" +ger(input, vec2, *, out=None) -> Tensor + +Alias of :func:`torch.outer`. + +.. warning:: + This function is deprecated and will be removed in a future PyTorch release. + Use :func:`torch.outer` instead. +""", +) + +add_docstr( + torch.get_default_dtype, + r""" +get_default_dtype() -> torch.dtype + +Get the current default floating point :class:`torch.dtype`. + +Example:: + + >>> torch.get_default_dtype() # initial default for floating point is torch.float32 + torch.float32 + >>> torch.set_default_dtype(torch.float64) + >>> torch.get_default_dtype() # default is now changed to torch.float64 + torch.float64 + +""", +) + +add_docstr( + torch.get_num_threads, + r""" +get_num_threads() -> int + +Returns the number of threads used for parallelizing CPU operations +""", +) + +add_docstr( + torch.get_num_interop_threads, + r""" +get_num_interop_threads() -> int + +Returns the number of threads used for inter-op parallelism on CPU +(e.g. in JIT interpreter) +""", +) + +add_docstr( + torch.gt, + r""" +gt(input, other, *, out=None) -> Tensor + +Computes :math:`\text{input} > \text{other}` element-wise. +""" + + r""" + +The second argument can be a number or a tensor whose shape is +:ref:`broadcastable ` with the first argument. + +Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + +Keyword args: + {out} + +Returns: + A boolean tensor that is True where :attr:`input` is greater than :attr:`other` and False elsewhere + +Example:: + + >>> torch.gt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, True], [False, False]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.greater, + r""" +greater(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.gt`. +""", +) + +add_docstr( + torch.histc, + r""" +histc(input, bins=100, min=0, max=0, *, out=None) -> Tensor + +Computes the histogram of a tensor. + +The elements are sorted into equal width bins between :attr:`min` and +:attr:`max`. If :attr:`min` and :attr:`max` are both zero, the minimum and +maximum values of the data are used. + +Elements lower than min and higher than max and ``NaN`` elements are ignored. + +Args: + {input} + bins (int): number of histogram bins + min (Scalar): lower end of the range (inclusive) + max (Scalar): upper end of the range (inclusive) + +Keyword args: + {out} + +Returns: + Tensor: Histogram represented as a tensor + +Example:: + + >>> torch.histc(torch.tensor([1., 2, 1]), bins=4, min=0, max=3) + tensor([ 0., 2., 1., 0.]) +""".format( + **common_args + ), +) + +add_docstr( + torch.histogram, + r""" +histogram(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor) + +Computes a histogram of the values in a tensor. + +:attr:`bins` can be an integer or a 1D tensor. + +If :attr:`bins` is an int, it specifies the number of equal-width bins. +By default, the lower and upper range of the bins is determined by the +minimum and maximum elements of the input tensor. The :attr:`range` +argument can be provided to specify a range for the bins. + +If :attr:`bins` is a 1D tensor, it specifies the sequence of bin edges +including the rightmost edge. It should contain at least 2 elements +and its elements should be increasing. + +Args: + {input} + bins: int or 1D Tensor. If int, defines the number of equal-width bins. If tensor, + defines the sequence of bin edges including the rightmost edge. + +Keyword args: + range (tuple of float): Defines the range of the bins. + weight (Tensor): If provided, weight should have the same shape as input. Each value in + input contributes its associated weight towards its bin's result. + density (bool): If False, the result will contain the count (or total weight) in each bin. + If True, the result is the value of the probability density function over the bins, + normalized such that the integral over the range of the bins is 1. + {out} (tuple, optional): The result tuple of two output tensors (hist, bin_edges). + +Returns: + hist (Tensor): 1D Tensor containing the values of the histogram. + bin_edges(Tensor): 1D Tensor containing the edges of the histogram bins. + +Example:: + + >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.])) + (tensor([ 0., 5., 2., 0.]), tensor([0., 0.75, 1.5, 2.25, 3.])) + >>> torch.histogram(torch.tensor([1., 2, 1]), bins=4, range=(0., 3.), weight=torch.tensor([1., 2., 4.]), density=True) + (tensor([ 0., 0.9524, 0.3810, 0.]), tensor([0., 0.75, 1.5, 2.25, 3.])) +""".format( + **common_args + ), +) + +add_docstr( + torch.histogramdd, + r""" +histogramdd(input, bins, *, range=None, weight=None, density=False, out=None) -> (Tensor, Tensor[]) + +Computes a multi-dimensional histogram of the values in a tensor. + +Interprets the elements of an input tensor whose innermost dimension has size N +as a collection of N-dimensional points. Maps each of the points into a set of +N-dimensional bins and returns the number of points (or total weight) in each bin. + +:attr:`input` must be a tensor with at least 2 dimensions. +If input has shape (M, N), each of its M rows defines a point in N-dimensional space. +If input has three or more dimensions, all but the last dimension are flattened. + +Each dimension is independently associated with its own strictly increasing sequence +of bin edges. Bin edges may be specified explicitly by passing a sequence of 1D +tensors. Alternatively, bin edges may be constructed automatically by passing a +sequence of integers specifying the number of equal-width bins in each dimension. + +For each N-dimensional point in input: + - Each of its coordinates is binned independently among the bin edges + corresponding to its dimension + - Binning results are combined to identify the N-dimensional bin (if any) + into which the point falls + - If the point falls into a bin, the bin's count (or total weight) is incremented + - Points which do not fall into any bin do not contribute to the output + +:attr:`bins` can be a sequence of N 1D tensors, a sequence of N ints, or a single int. + +If :attr:`bins` is a sequence of N 1D tensors, it explicitly specifies the N sequences +of bin edges. Each 1D tensor should contain a strictly increasing sequence with at +least one element. A sequence of K bin edges defines K-1 bins, explicitly specifying +the left and right edges of all bins. Every bin is exclusive of its left edge. Only +the rightmost bin is inclusive of its right edge. + +If :attr:`bins` is a sequence of N ints, it specifies the number of equal-width bins +in each dimension. By default, the leftmost and rightmost bin edges in each dimension +are determined by the minimum and maximum elements of the input tensor in the +corresponding dimension. The :attr:`range` argument can be provided to manually +specify the leftmost and rightmost bin edges in each dimension. + +If :attr:`bins` is an int, it specifies the number of equal-width bins for all dimensions. + +.. note:: + See also :func:`torch.histogram`, which specifically computes 1D histograms. + While :func:`torch.histogramdd` infers the dimensionality of its bins and + binned values from the shape of :attr:`input`, :func:`torch.histogram` + accepts and flattens :attr:`input` of any shape. + +Args: + {input} + bins: Tensor[], int[], or int. + If Tensor[], defines the sequences of bin edges. + If int[], defines the number of equal-width bins in each dimension. + If int, defines the number of equal-width bins for all dimensions. +Keyword args: + range (sequence of float): Defines the leftmost and rightmost bin edges + in each dimension. + weight (Tensor): By default, each value in the input has weight 1. If a weight + tensor is passed, each N-dimensional coordinate in input + contributes its associated weight towards its bin's result. + The weight tensor should have the same shape as the :attr:`input` + tensor excluding its innermost dimension N. + density (bool): If False (default), the result will contain the count (or total weight) + in each bin. If True, each count (weight) is divided by the total count + (total weight), then divided by the volume of its associated bin. +Returns: + hist (Tensor): N-dimensional Tensor containing the values of the histogram. + bin_edges(Tensor[]): sequence of N 1D Tensors containing the bin edges. + +Example:: + >>> torch.histogramdd(torch.tensor([[0., 1.], [1., 0.], [2., 0.], [2., 2.]]), bins=[3, 3], + ... weight=torch.tensor([1., 2., 4., 8.])) + torch.return_types.histogramdd( + hist=tensor([[0., 1., 0.], + [2., 0., 0.], + [4., 0., 8.]]), + bin_edges=(tensor([0.0000, 0.6667, 1.3333, 2.0000]), + tensor([0.0000, 0.6667, 1.3333, 2.0000]))) + + >>> torch.histogramdd(torch.tensor([[0., 0.], [1., 1.], [2., 2.]]), bins=[2, 2], + ... range=[0., 1., 0., 1.], density=True) + torch.return_types.histogramdd( + hist=tensor([[2., 0.], + [0., 2.]]), + bin_edges=(tensor([0.0000, 0.5000, 1.0000]), + tensor([0.0000, 0.5000, 1.0000]))) + +""".format( + **common_args + ), +) +# TODO: Fix via https://github.com/pytorch/pytorch/issues/75798 +torch.histogramdd.__module__ = "torch" + +add_docstr( + torch.hypot, + r""" +hypot(input, other, *, out=None) -> Tensor + +Given the legs of a right triangle, return its hypotenuse. + +.. math:: + \text{out}_{i} = \sqrt{\text{input}_{i}^{2} + \text{other}_{i}^{2}} + +The shapes of ``input`` and ``other`` must be +:ref:`broadcastable `. +""" + + r""" +Args: + input (Tensor): the first input tensor + other (Tensor): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> a = torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0])) + tensor([5.0000, 5.6569, 6.4031]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.i0, + r""" +i0(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.i0`. +""", +) + +add_docstr( + torch.igamma, + r""" +igamma(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.special.gammainc`. +""", +) + +add_docstr( + torch.igammac, + r""" +igammac(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.special.gammaincc`. +""", +) + +add_docstr( + torch.index_select, + r""" +index_select(input, dim, index, *, out=None) -> Tensor + +Returns a new tensor which indexes the :attr:`input` tensor along dimension +:attr:`dim` using the entries in :attr:`index` which is a `LongTensor`. + +The returned tensor has the same number of dimensions as the original tensor +(:attr:`input`). The :attr:`dim`\ th dimension has the same size as the length +of :attr:`index`; other dimensions have the same size as in the original tensor. + +.. note:: The returned tensor does **not** use the same storage as the original + tensor. If :attr:`out` has a different shape than expected, we + silently change it to the correct shape, reallocating the underlying + storage if necessary. + +Args: + {input} + dim (int): the dimension in which we index + index (IntTensor or LongTensor): the 1-D tensor containing the indices to index + +Keyword args: + {out} + +Example:: + + >>> x = torch.randn(3, 4) + >>> x + tensor([[ 0.1427, 0.0231, -0.5414, -1.0009], + [-0.4664, 0.2647, -0.1228, -1.1068], + [-1.1734, -0.6571, 0.7230, -0.6004]]) + >>> indices = torch.tensor([0, 2]) + >>> torch.index_select(x, 0, indices) + tensor([[ 0.1427, 0.0231, -0.5414, -1.0009], + [-1.1734, -0.6571, 0.7230, -0.6004]]) + >>> torch.index_select(x, 1, indices) + tensor([[ 0.1427, -0.5414], + [-0.4664, -0.1228], + [-1.1734, 0.7230]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.inverse, + r""" +inverse(input, *, out=None) -> Tensor + +Alias for :func:`torch.linalg.inv` +""", +) + +add_docstr( + torch.isin, + r""" +isin(elements, test_elements, *, assume_unique=False, invert=False) -> Tensor + +Tests if each element of :attr:`elements` is in :attr:`test_elements`. Returns +a boolean tensor of the same shape as :attr:`elements` that is True for elements +in :attr:`test_elements` and False otherwise. + +.. note:: + One of :attr:`elements` or :attr:`test_elements` can be a scalar, but not both. + +Args: + elements (Tensor or Scalar): Input elements + test_elements (Tensor or Scalar): Values against which to test for each input element + assume_unique (bool, optional): If True, assumes both :attr:`elements` and + :attr:`test_elements` contain unique elements, which can speed up the + calculation. Default: False + invert (bool, optional): If True, inverts the boolean return tensor, resulting in True + values for elements *not* in :attr:`test_elements`. Default: False + +Returns: + A boolean tensor of the same shape as :attr:`elements` that is True for elements in + :attr:`test_elements` and False otherwise + +Example: + >>> torch.isin(torch.tensor([[1, 2], [3, 4]]), torch.tensor([2, 3])) + tensor([[False, True], + [ True, False]]) +""", +) + +add_docstr( + torch.isinf, + r""" +isinf(input) -> Tensor + +Tests if each element of :attr:`input` is infinite +(positive or negative infinity) or not. + +.. note:: + Complex values are infinite when their real or imaginary part is + infinite. + +Args: + {input} + +Returns: + A boolean tensor that is True where :attr:`input` is infinite and False elsewhere + +Example:: + + >>> torch.isinf(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')])) + tensor([False, True, False, True, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.isposinf, + r""" +isposinf(input, *, out=None) -> Tensor +Tests if each element of :attr:`input` is positive infinity or not. + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([-float('inf'), float('inf'), 1.2]) + >>> torch.isposinf(a) + tensor([False, True, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.isneginf, + r""" +isneginf(input, *, out=None) -> Tensor +Tests if each element of :attr:`input` is negative infinity or not. + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([-float('inf'), float('inf'), 1.2]) + >>> torch.isneginf(a) + tensor([ True, False, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.isclose, + r""" +isclose(input, other, rtol=1e-05, atol=1e-08, equal_nan=False) -> Tensor + +Returns a new tensor with boolean elements representing if each element of +:attr:`input` is "close" to the corresponding element of :attr:`other`. +Closeness is defined as: + +.. math:: + \lvert \text{input} - \text{other} \rvert \leq \texttt{atol} + \texttt{rtol} \times \lvert \text{other} \rvert +""" + + r""" + +where :attr:`input` and :attr:`other` are finite. Where :attr:`input` +and/or :attr:`other` are nonfinite they are close if and only if +they are equal, with NaNs being considered equal to each other when +:attr:`equal_nan` is True. + +Args: + input (Tensor): first tensor to compare + other (Tensor): second tensor to compare + atol (float, optional): absolute tolerance. Default: 1e-08 + rtol (float, optional): relative tolerance. Default: 1e-05 + equal_nan (bool, optional): if ``True``, then two ``NaN`` s will be considered equal. Default: ``False`` + +Examples:: + + >>> torch.isclose(torch.tensor((1., 2, 3)), torch.tensor((1 + 1e-10, 3, 4))) + tensor([ True, False, False]) + >>> torch.isclose(torch.tensor((float('inf'), 4)), torch.tensor((float('inf'), 6)), rtol=.5) + tensor([True, True]) +""", +) + +add_docstr( + torch.isfinite, + r""" +isfinite(input) -> Tensor + +Returns a new tensor with boolean elements representing if each element is `finite` or not. + +Real values are finite when they are not NaN, negative infinity, or infinity. +Complex values are finite when both their real and imaginary parts are finite. + +Args: + {input} + +Returns: + A boolean tensor that is True where :attr:`input` is finite and False elsewhere + +Example:: + + >>> torch.isfinite(torch.tensor([1, float('inf'), 2, float('-inf'), float('nan')])) + tensor([True, False, True, False, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.isnan, + r""" +isnan(input) -> Tensor + +Returns a new tensor with boolean elements representing if each element of :attr:`input` +is NaN or not. Complex values are considered NaN when either their real +and/or imaginary part is NaN. + +Arguments: + {input} + +Returns: + A boolean tensor that is True where :attr:`input` is NaN and False elsewhere + +Example:: + + >>> torch.isnan(torch.tensor([1, float('nan'), 2])) + tensor([False, True, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.isreal, + r""" +isreal(input) -> Tensor + +Returns a new tensor with boolean elements representing if each element of :attr:`input` is real-valued or not. +All real-valued types are considered real. Complex values are considered real when their imaginary part is 0. + +Arguments: + {input} + +Returns: + A boolean tensor that is True where :attr:`input` is real and False elsewhere + +Example:: + + >>> torch.isreal(torch.tensor([1, 1+1j, 2+0j])) + tensor([True, False, True]) +""".format( + **common_args + ), +) + +add_docstr( + torch.is_floating_point, + r""" +is_floating_point(input) -> (bool) + +Returns True if the data type of :attr:`input` is a floating point data type i.e., +one of ``torch.float64``, ``torch.float32``, ``torch.float16``, and ``torch.bfloat16``. + +Args: + {input} +""".format( + **common_args + ), +) + +add_docstr( + torch.is_complex, + r""" +is_complex(input) -> (bool) + +Returns True if the data type of :attr:`input` is a complex data type i.e., +one of ``torch.complex64``, and ``torch.complex128``. + +Args: + {input} +""".format( + **common_args + ), +) + +add_docstr( + torch.is_grad_enabled, + r""" +is_grad_enabled() -> (bool) + +Returns True if grad mode is currently enabled. +""".format( + **common_args + ), +) + +add_docstr( + torch.is_inference_mode_enabled, + r""" +is_inference_mode_enabled() -> (bool) + +Returns True if inference mode is currently enabled. +""".format( + **common_args + ), +) + +add_docstr( + torch.is_inference, + r""" +is_inference(input) -> (bool) + +Returns True if :attr:`input` is an inference tensor. + +A non-view tensor is an inference tensor if and only if it was +allocated during inference mode. A view tensor is an inference +tensor if and only if the tensor it is a view of is an inference tensor. + +For details on inference mode please see +`Inference Mode `_. + +Args: + {input} +""".format( + **common_args + ), +) + +add_docstr( + torch.is_conj, + r""" +is_conj(input) -> (bool) + +Returns True if the :attr:`input` is a conjugated tensor, i.e. its conjugate bit is set to `True`. + +Args: + {input} +""".format( + **common_args + ), +) + +add_docstr( + torch.is_nonzero, + r""" +is_nonzero(input) -> (bool) + +Returns True if the :attr:`input` is a single element tensor which is not equal to zero +after type conversions. +i.e. not equal to ``torch.tensor([0.])`` or ``torch.tensor([0])`` or +``torch.tensor([False])``. +Throws a ``RuntimeError`` if ``torch.numel() != 1`` (even in case +of sparse tensors). + +Args: + {input} + +Examples:: + + >>> torch.is_nonzero(torch.tensor([0.])) + False + >>> torch.is_nonzero(torch.tensor([1.5])) + True + >>> torch.is_nonzero(torch.tensor([False])) + False + >>> torch.is_nonzero(torch.tensor([3])) + True + >>> torch.is_nonzero(torch.tensor([1, 3, 5])) + Traceback (most recent call last): + ... + RuntimeError: bool value of Tensor with more than one value is ambiguous + >>> torch.is_nonzero(torch.tensor([])) + Traceback (most recent call last): + ... + RuntimeError: bool value of Tensor with no values is ambiguous +""".format( + **common_args + ), +) + +add_docstr( + torch.kron, + r""" +kron(input, other, *, out=None) -> Tensor + +Computes the Kronecker product, denoted by :math:`\otimes`, of :attr:`input` and :attr:`other`. + +If :attr:`input` is a :math:`(a_0 \times a_1 \times \dots \times a_n)` tensor and :attr:`other` is a +:math:`(b_0 \times b_1 \times \dots \times b_n)` tensor, the result will be a +:math:`(a_0*b_0 \times a_1*b_1 \times \dots \times a_n*b_n)` tensor with the following entries: + +.. math:: + (\text{input} \otimes \text{other})_{k_0, k_1, \dots, k_n} = + \text{input}_{i_0, i_1, \dots, i_n} * \text{other}_{j_0, j_1, \dots, j_n}, + +where :math:`k_t = i_t * b_t + j_t` for :math:`0 \leq t \leq n`. +If one tensor has fewer dimensions than the other it is unsqueezed until it has the same number of dimensions. + +Supports real-valued and complex-valued inputs. + +.. note:: + This function generalizes the typical definition of the Kronecker product for two matrices to two tensors, + as described above. When :attr:`input` is a :math:`(m \times n)` matrix and :attr:`other` is a + :math:`(p \times q)` matrix, the result will be a :math:`(p*m \times q*n)` block matrix: + + .. math:: + \mathbf{A} \otimes \mathbf{B}=\begin{bmatrix} + a_{11} \mathbf{B} & \cdots & a_{1 n} \mathbf{B} \\ + \vdots & \ddots & \vdots \\ + a_{m 1} \mathbf{B} & \cdots & a_{m n} \mathbf{B} \end{bmatrix} + + where :attr:`input` is :math:`\mathbf{A}` and :attr:`other` is :math:`\mathbf{B}`. + +Arguments: + input (Tensor) + other (Tensor) + +Keyword args: + out (Tensor, optional): The output tensor. Ignored if ``None``. Default: ``None`` + +Examples:: + + >>> mat1 = torch.eye(2) + >>> mat2 = torch.ones(2, 2) + >>> torch.kron(mat1, mat2) + tensor([[1., 1., 0., 0.], + [1., 1., 0., 0.], + [0., 0., 1., 1.], + [0., 0., 1., 1.]]) + + >>> mat1 = torch.eye(2) + >>> mat2 = torch.arange(1, 5).reshape(2, 2) + >>> torch.kron(mat1, mat2) + tensor([[1., 2., 0., 0.], + [3., 4., 0., 0.], + [0., 0., 1., 2.], + [0., 0., 3., 4.]]) +""", +) + +add_docstr( + torch.kthvalue, + r""" +kthvalue(input, k, dim=None, keepdim=False, *, out=None) -> (Tensor, LongTensor) + +Returns a namedtuple ``(values, indices)`` where ``values`` is the :attr:`k` th +smallest element of each row of the :attr:`input` tensor in the given dimension +:attr:`dim`. And ``indices`` is the index location of each element found. + +If :attr:`dim` is not given, the last dimension of the `input` is chosen. + +If :attr:`keepdim` is ``True``, both the :attr:`values` and :attr:`indices` tensors +are the same size as :attr:`input`, except in the dimension :attr:`dim` where +they are of size 1. Otherwise, :attr:`dim` is squeezed +(see :func:`torch.squeeze`), resulting in both the :attr:`values` and +:attr:`indices` tensors having 1 fewer dimension than the :attr:`input` tensor. + +.. note:: + When :attr:`input` is a CUDA tensor and there are multiple valid + :attr:`k` th values, this function may nondeterministically return + :attr:`indices` for any of them. + +Args: + {input} + k (int): k for the k-th smallest element + dim (int, optional): the dimension to find the kth value along + {keepdim} + +Keyword args: + out (tuple, optional): the output tuple of (Tensor, LongTensor) + can be optionally given to be used as output buffers + +Example:: + + >>> x = torch.arange(1., 6.) + >>> x + tensor([ 1., 2., 3., 4., 5.]) + >>> torch.kthvalue(x, 4) + torch.return_types.kthvalue(values=tensor(4.), indices=tensor(3)) + + >>> x=torch.arange(1.,7.).resize_(2,3) + >>> x + tensor([[ 1., 2., 3.], + [ 4., 5., 6.]]) + >>> torch.kthvalue(x, 2, 0, True) + torch.return_types.kthvalue(values=tensor([[4., 5., 6.]]), indices=tensor([[1, 1, 1]])) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.lcm, + r""" +lcm(input, other, *, out=None) -> Tensor + +Computes the element-wise least common multiple (LCM) of :attr:`input` and :attr:`other`. + +Both :attr:`input` and :attr:`other` must have integer types. + +.. note:: + This defines :math:`lcm(0, 0) = 0` and :math:`lcm(0, a) = 0`. + +Args: + {input} + other (Tensor): the second input tensor + +Keyword arguments: + {out} + +Example:: + + >>> a = torch.tensor([5, 10, 15]) + >>> b = torch.tensor([3, 4, 5]) + >>> torch.lcm(a, b) + tensor([15, 20, 15]) + >>> c = torch.tensor([3]) + >>> torch.lcm(a, c) + tensor([15, 30, 15]) +""".format( + **common_args + ), +) + +add_docstr( + torch.ldexp, + r""" +ldexp(input, other, *, out=None) -> Tensor + +Multiplies :attr:`input` by 2 ** :attr:`other`. + +.. math:: + \text{{out}}_i = \text{{input}}_i * 2^\text{{other}}_i +""" + + r""" + +Typically this function is used to construct floating point numbers by multiplying +mantissas in :attr:`input` with integral powers of two created from the exponents +in :attr:`other`. + +Args: + {input} + other (Tensor): a tensor of exponents, typically integers. + +Keyword args: + {out} + +Example:: + + >>> torch.ldexp(torch.tensor([1.]), torch.tensor([1])) + tensor([2.]) + >>> torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4])) + tensor([ 2., 4., 8., 16.]) + + +""".format( + **common_args + ), +) + +add_docstr( + torch.le, + r""" +le(input, other, *, out=None) -> Tensor + +Computes :math:`\text{input} \leq \text{other}` element-wise. +""" + + r""" + +The second argument can be a number or a tensor whose shape is +:ref:`broadcastable ` with the first argument. + +Args: + input (Tensor): the tensor to compare + other (Tensor or Scalar): the tensor or value to compare + +Keyword args: + {out} + +Returns: + A boolean tensor that is True where :attr:`input` is less than or equal to + :attr:`other` and False elsewhere + +Example:: + + >>> torch.le(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[True, False], [True, True]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.less_equal, + r""" +less_equal(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.le`. +""", +) + +add_docstr( + torch.lerp, + r""" +lerp(input, end, weight, *, out=None) + +Does a linear interpolation of two tensors :attr:`start` (given by :attr:`input`) and :attr:`end` based +on a scalar or tensor :attr:`weight` and returns the resulting :attr:`out` tensor. + +.. math:: + \text{out}_i = \text{start}_i + \text{weight}_i \times (\text{end}_i - \text{start}_i) +""" + + r""" +The shapes of :attr:`start` and :attr:`end` must be +:ref:`broadcastable `. If :attr:`weight` is a tensor, then +the shapes of :attr:`weight`, :attr:`start`, and :attr:`end` must be :ref:`broadcastable `. + +Args: + input (Tensor): the tensor with the starting points + end (Tensor): the tensor with the ending points + weight (float or tensor): the weight for the interpolation formula + +Keyword args: + {out} + +Example:: + + >>> start = torch.arange(1., 5.) + >>> end = torch.empty(4).fill_(10) + >>> start + tensor([ 1., 2., 3., 4.]) + >>> end + tensor([ 10., 10., 10., 10.]) + >>> torch.lerp(start, end, 0.5) + tensor([ 5.5000, 6.0000, 6.5000, 7.0000]) + >>> torch.lerp(start, end, torch.full_like(start, 0.5)) + tensor([ 5.5000, 6.0000, 6.5000, 7.0000]) +""".format( + **common_args + ), +) + +add_docstr( + torch.lgamma, + r""" +lgamma(input, *, out=None) -> Tensor + +Computes the natural logarithm of the absolute value of the gamma function on :attr:`input`. + +.. math:: + \text{out}_{i} = \ln |\Gamma(\text{input}_{i})| +""" + + """ +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.arange(0.5, 2, 0.5) + >>> torch.lgamma(a) + tensor([ 0.5724, 0.0000, -0.1208]) +""".format( + **common_args + ), +) + +add_docstr( + torch.linspace, + r""" +linspace(start, end, steps, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly +spaced from :attr:`start` to :attr:`end`, inclusive. That is, the value are: + +.. math:: + (\text{start}, + \text{start} + \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \ldots, + \text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{\text{steps} - 1}, + \text{end}) +""" + + """ + +From PyTorch 1.11 linspace requires the steps argument. Use steps=100 to restore the previous behavior. + +Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + +Keyword arguments: + {out} + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + {layout} + {device} + {requires_grad} + + +Example:: + + >>> torch.linspace(3, 10, steps=5) + tensor([ 3.0000, 4.7500, 6.5000, 8.2500, 10.0000]) + >>> torch.linspace(-10, 10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=5) + tensor([-10., -5., 0., 5., 10.]) + >>> torch.linspace(start=-10, end=10, steps=1) + tensor([-10.]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.log, + r""" +log(input, *, out=None) -> Tensor + +Returns a new tensor with the natural logarithm of the elements +of :attr:`input`. + +.. math:: + y_{i} = \log_{e} (x_{i}) +""" + + r""" + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.rand(5) * 5 + >>> a + tensor([4.7767, 4.3234, 1.2156, 0.2411, 4.5739]) + >>> torch.log(a) + tensor([ 1.5637, 1.4640, 0.1952, -1.4226, 1.5204]) +""".format( + **common_args + ), +) + +add_docstr( + torch.log10, + r""" +log10(input, *, out=None) -> Tensor + +Returns a new tensor with the logarithm to the base 10 of the elements +of :attr:`input`. + +.. math:: + y_{i} = \log_{10} (x_{i}) +""" + + r""" + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.rand(5) + >>> a + tensor([ 0.5224, 0.9354, 0.7257, 0.1301, 0.2251]) + + + >>> torch.log10(a) + tensor([-0.2820, -0.0290, -0.1392, -0.8857, -0.6476]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.log1p, + r""" +log1p(input, *, out=None) -> Tensor + +Returns a new tensor with the natural logarithm of (1 + :attr:`input`). + +.. math:: + y_i = \log_{e} (x_i + 1) +""" + + r""" +.. note:: This function is more accurate than :func:`torch.log` for small + values of :attr:`input` + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(5) + >>> a + tensor([-1.0090, -0.9923, 1.0249, -0.5372, 0.2492]) + >>> torch.log1p(a) + tensor([ nan, -4.8653, 0.7055, -0.7705, 0.2225]) +""".format( + **common_args + ), +) + +add_docstr( + torch.log2, + r""" +log2(input, *, out=None) -> Tensor + +Returns a new tensor with the logarithm to the base 2 of the elements +of :attr:`input`. + +.. math:: + y_{i} = \log_{2} (x_{i}) +""" + + r""" + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.rand(5) + >>> a + tensor([ 0.8419, 0.8003, 0.9971, 0.5287, 0.0490]) + + + >>> torch.log2(a) + tensor([-0.2483, -0.3213, -0.0042, -0.9196, -4.3504]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.logaddexp, + r""" +logaddexp(input, other, *, out=None) -> Tensor + +Logarithm of the sum of exponentiations of the inputs. + +Calculates pointwise :math:`\log\left(e^x + e^y\right)`. This function is useful +in statistics where the calculated probabilities of events may be so small as to +exceed the range of normal floating point numbers. In such cases the logarithm +of the calculated probability is stored. This function allows adding +probabilities stored in such a fashion. + +This op should be disambiguated with :func:`torch.logsumexp` which performs a +reduction on a single tensor. + +Args: + {input} + other (Tensor): the second input tensor + +Keyword arguments: + {out} + +Example:: + + >>> torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3])) + tensor([-0.3069, -0.6867, -0.8731]) + >>> torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3])) + tensor([-1., -2., -3.]) + >>> torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3])) + tensor([1.1269e+00, 2.0000e+03, 3.0000e+04]) +""".format( + **common_args + ), +) + +add_docstr( + torch.logaddexp2, + r""" +logaddexp2(input, other, *, out=None) -> Tensor + +Logarithm of the sum of exponentiations of the inputs in base-2. + +Calculates pointwise :math:`\log_2\left(2^x + 2^y\right)`. See +:func:`torch.logaddexp` for more details. + +Args: + {input} + other (Tensor): the second input tensor + +Keyword arguments: + {out} +""".format( + **common_args + ), +) + +add_docstr( + torch.xlogy, + r""" +xlogy(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.special.xlogy`. +""", +) + +add_docstr( + torch.logical_and, + r""" +logical_and(input, other, *, out=None) -> Tensor + +Computes the element-wise logical AND of the given input tensors. Zeros are treated as ``False`` and nonzeros are +treated as ``True``. + +Args: + {input} + other (Tensor): the tensor to compute AND with + +Keyword args: + {out} + +Example:: + + >>> torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False])) + tensor([ True, False, False]) + >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8) + >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8) + >>> torch.logical_and(a, b) + tensor([False, False, True, False]) + >>> torch.logical_and(a.double(), b.double()) + tensor([False, False, True, False]) + >>> torch.logical_and(a.double(), b) + tensor([False, False, True, False]) + >>> torch.logical_and(a, b, out=torch.empty(4, dtype=torch.bool)) + tensor([False, False, True, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.logical_not, + r""" +logical_not(input, *, out=None) -> Tensor + +Computes the element-wise logical NOT of the given input tensor. If not specified, the output tensor will have the bool +dtype. If the input tensor is not a bool tensor, zeros are treated as ``False`` and non-zeros are treated as ``True``. + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> torch.logical_not(torch.tensor([True, False])) + tensor([False, True]) + >>> torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8)) + tensor([ True, False, False]) + >>> torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double)) + tensor([ True, False, False]) + >>> torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16)) + tensor([1, 0, 0], dtype=torch.int16) +""".format( + **common_args + ), +) + +add_docstr( + torch.logical_or, + r""" +logical_or(input, other, *, out=None) -> Tensor + +Computes the element-wise logical OR of the given input tensors. Zeros are treated as ``False`` and nonzeros are +treated as ``True``. + +Args: + {input} + other (Tensor): the tensor to compute OR with + +Keyword args: + {out} + +Example:: + + >>> torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False])) + tensor([ True, False, True]) + >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8) + >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8) + >>> torch.logical_or(a, b) + tensor([ True, True, True, False]) + >>> torch.logical_or(a.double(), b.double()) + tensor([ True, True, True, False]) + >>> torch.logical_or(a.double(), b) + tensor([ True, True, True, False]) + >>> torch.logical_or(a, b, out=torch.empty(4, dtype=torch.bool)) + tensor([ True, True, True, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.logical_xor, + r""" +logical_xor(input, other, *, out=None) -> Tensor + +Computes the element-wise logical XOR of the given input tensors. Zeros are treated as ``False`` and nonzeros are +treated as ``True``. + +Args: + {input} + other (Tensor): the tensor to compute XOR with + +Keyword args: + {out} + +Example:: + + >>> torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False])) + tensor([False, False, True]) + >>> a = torch.tensor([0, 1, 10, 0], dtype=torch.int8) + >>> b = torch.tensor([4, 0, 1, 0], dtype=torch.int8) + >>> torch.logical_xor(a, b) + tensor([ True, True, False, False]) + >>> torch.logical_xor(a.double(), b.double()) + tensor([ True, True, False, False]) + >>> torch.logical_xor(a.double(), b) + tensor([ True, True, False, False]) + >>> torch.logical_xor(a, b, out=torch.empty(4, dtype=torch.bool)) + tensor([ True, True, False, False]) +""".format( + **common_args + ), +) + +add_docstr( + torch.logspace, + """ +logspace(start, end, steps, base=10.0, *, \ + out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor +""" + + r""" + +Creates a one-dimensional tensor of size :attr:`steps` whose values are evenly +spaced from :math:`{{\text{{base}}}}^{{\text{{start}}}}` to +:math:`{{\text{{base}}}}^{{\text{{end}}}}`, inclusive, on a logarithmic scale +with base :attr:`base`. That is, the values are: + +.. math:: + (\text{base}^{\text{start}}, + \text{base}^{(\text{start} + \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \ldots, + \text{base}^{(\text{start} + (\text{steps} - 2) * \frac{\text{end} - \text{start}}{ \text{steps} - 1})}, + \text{base}^{\text{end}}) +""" + + """ + + +From PyTorch 1.11 logspace requires the steps argument. Use steps=100 to restore the previous behavior. + +Args: + start (float or Tensor): the starting value for the set of points. If `Tensor`, it must be 0-dimensional + end (float or Tensor): the ending value for the set of points. If `Tensor`, it must be 0-dimensional + steps (int): size of the constructed tensor + base (float, optional): base of the logarithm function. Default: ``10.0``. + +Keyword arguments: + {out} + dtype (torch.dtype, optional): the data type to perform the computation in. + Default: if None, uses the global default dtype (see torch.get_default_dtype()) + when both :attr:`start` and :attr:`end` are real, + and corresponding complex dtype when either is complex. + {layout} + {device} + {requires_grad} + +Example:: + + >>> torch.logspace(start=-10, end=10, steps=5) + tensor([ 1.0000e-10, 1.0000e-05, 1.0000e+00, 1.0000e+05, 1.0000e+10]) + >>> torch.logspace(start=0.1, end=1.0, steps=5) + tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000]) + >>> torch.logspace(start=0.1, end=1.0, steps=1) + tensor([1.2589]) + >>> torch.logspace(start=2, end=2, steps=1, base=2) + tensor([4.0]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.logsumexp, + r""" +logsumexp(input, dim, keepdim=False, *, out=None) + +Returns the log of summed exponentials of each row of the :attr:`input` +tensor in the given dimension :attr:`dim`. The computation is numerically +stabilized. + +For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is + + .. math:: + \text{{logsumexp}}(x)_{{i}} = \log \sum_j \exp(x_{{ij}}) + +{keepdim_details} + +Args: + {input} + {opt_dim} + {keepdim} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(3, 3) + >>> torch.logsumexp(a, 1) + tensor([1.4907, 1.0593, 1.5696]) + >>> torch.dist(torch.logsumexp(a, 1), torch.log(torch.sum(torch.exp(a), 1))) + tensor(1.6859e-07) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.lt, + r""" +lt(input, other, *, out=None) -> Tensor + +Computes :math:`\text{input} < \text{other}` element-wise. +""" + + r""" + +The second argument can be a number or a tensor whose shape is +:ref:`broadcastable ` with the first argument. + +Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + +Keyword args: + {out} + +Returns: + A boolean tensor that is True where :attr:`input` is less than :attr:`other` and False elsewhere + +Example:: + + >>> torch.lt(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, False], [True, False]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.lu_unpack, + r""" +lu_unpack(LU_data, LU_pivots, unpack_data=True, unpack_pivots=True, *, out=None) -> (Tensor, Tensor, Tensor) + +Unpacks the LU decomposition returned by :func:`~linalg.lu_factor` into the `P, L, U` matrices. + +.. seealso:: + + :func:`~linalg.lu` returns the matrices from the LU decomposition. Its gradient formula is more efficient + than that of doing :func:`~linalg.lu_factor` followed by :func:`~linalg.lu_unpack`. + +Args: + LU_data (Tensor): the packed LU factorization data + LU_pivots (Tensor): the packed LU factorization pivots + unpack_data (bool): flag indicating if the data should be unpacked. + If ``False``, then the returned ``L`` and ``U`` are empty tensors. + Default: ``True`` + unpack_pivots (bool): flag indicating if the pivots should be unpacked into a permutation matrix ``P``. + If ``False``, then the returned ``P`` is an empty tensor. + Default: ``True`` + +Keyword args: + out (tuple, optional): output tuple of three tensors. Ignored if `None`. + +Returns: + A namedtuple ``(P, L, U)`` + +Examples:: + + >>> A = torch.randn(2, 3, 3) + >>> LU, pivots = torch.linalg.lu_factor(A) + >>> P, L, U = torch.lu_unpack(LU, pivots) + >>> # We can recover A from the factorization + >>> A_ = P @ L @ U + >>> torch.allclose(A, A_) + True + + >>> # LU factorization of a rectangular matrix: + >>> A = torch.randn(2, 3, 2) + >>> LU, pivots = torch.linalg.lu_factor(A) + >>> P, L, U = torch.lu_unpack(LU, pivots) + >>> # P, L, U are the same as returned by linalg.lu + >>> P_, L_, U_ = torch.linalg.lu(A) + >>> torch.allclose(P, P_) and torch.allclose(L, L_) and torch.allclose(U, U_) + True + +""".format( + **common_args + ), +) + +add_docstr( + torch.less, + r""" +less(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.lt`. +""", +) + +add_docstr( + torch.lu_solve, + r""" +lu_solve(b, LU_data, LU_pivots, *, out=None) -> Tensor + +Returns the LU solve of the linear system :math:`Ax = b` using the partially pivoted +LU factorization of A from :func:`~linalg.lu_factor`. + +This function supports ``float``, ``double``, ``cfloat`` and ``cdouble`` dtypes for :attr:`input`. + +.. warning:: + + :func:`torch.lu_solve` is deprecated in favor of :func:`torch.linalg.lu_solve`. + :func:`torch.lu_solve` will be removed in a future PyTorch release. + ``X = torch.lu_solve(B, LU, pivots)`` should be replaced with + + .. code:: python + + X = linalg.lu_solve(LU, pivots, B) + +Arguments: + b (Tensor): the RHS tensor of size :math:`(*, m, k)`, where :math:`*` + is zero or more batch dimensions. + LU_data (Tensor): the pivoted LU factorization of A from :meth:`~linalg.lu_factor` of size :math:`(*, m, m)`, + where :math:`*` is zero or more batch dimensions. + LU_pivots (IntTensor): the pivots of the LU factorization from :meth:`~linalg.lu_factor` of size :math:`(*, m)`, + where :math:`*` is zero or more batch dimensions. + The batch dimensions of :attr:`LU_pivots` must be equal to the batch dimensions of + :attr:`LU_data`. + +Keyword args: + {out} + +Example:: + + >>> A = torch.randn(2, 3, 3) + >>> b = torch.randn(2, 3, 1) + >>> LU, pivots = torch.linalg.lu_factor(A) + >>> x = torch.lu_solve(b, LU, pivots) + >>> torch.dist(A @ x, b) + tensor(1.00000e-07 * + 2.8312) +""".format( + **common_args + ), +) + +add_docstr( + torch.masked_select, + r""" +masked_select(input, mask, *, out=None) -> Tensor + +Returns a new 1-D tensor which indexes the :attr:`input` tensor according to +the boolean mask :attr:`mask` which is a `BoolTensor`. + +The shapes of the :attr:`mask` tensor and the :attr:`input` tensor don't need +to match, but they must be :ref:`broadcastable `. + +.. note:: The returned tensor does **not** use the same storage + as the original tensor + +Args: + {input} + mask (BoolTensor): the tensor containing the binary mask to index with + +Keyword args: + {out} + +Example:: + + >>> x = torch.randn(3, 4) + >>> x + tensor([[ 0.3552, -2.3825, -0.8297, 0.3477], + [-1.2035, 1.2252, 0.5002, 0.6248], + [ 0.1307, -2.0608, 0.1244, 2.0139]]) + >>> mask = x.ge(0.5) + >>> mask + tensor([[False, False, False, False], + [False, True, True, True], + [False, False, False, True]]) + >>> torch.masked_select(x, mask) + tensor([ 1.2252, 0.5002, 0.6248, 2.0139]) +""".format( + **common_args + ), +) + +add_docstr( + torch.matrix_power, + r""" +matrix_power(input, n, *, out=None) -> Tensor + +Alias for :func:`torch.linalg.matrix_power` +""", +) + +add_docstr( + torch.matrix_exp, + r""" +matrix_exp(A) -> Tensor + +Alias for :func:`torch.linalg.matrix_exp`. +""", +) + +add_docstr( + torch.max, + r""" +max(input) -> Tensor + +Returns the maximum value of all elements in the ``input`` tensor. + +.. warning:: + This function produces deterministic (sub)gradients unlike ``max(dim=0)`` + +Args: + {input} + +Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6763, 0.7445, -2.2369]]) + >>> torch.max(a) + tensor(0.7445) + +.. function:: max(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + +Returns a namedtuple ``(values, indices)`` where ``values`` is the maximum +value of each row of the :attr:`input` tensor in the given dimension +:attr:`dim`. And ``indices`` is the index location of each maximum value found +(argmax). + +If ``keepdim`` is ``True``, the output tensors are of the same size +as ``input`` except in the dimension ``dim`` where they are of size 1. +Otherwise, ``dim`` is squeezed (see :func:`torch.squeeze`), resulting +in the output tensors having 1 fewer dimension than ``input``. + +.. note:: If there are multiple maximal values in a reduced row then + the indices of the first maximal value are returned. + +Args: + {input} + {dim} + {keepdim} Default: ``False``. + +Keyword args: + out (tuple, optional): the result tuple of two output tensors (max, max_indices) + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-1.2360, -0.2942, -0.1222, 0.8475], + [ 1.1949, -1.1127, -2.2379, -0.6702], + [ 1.5717, -0.9207, 0.1297, -1.8768], + [-0.6172, 1.0036, -0.6060, -0.2432]]) + >>> torch.max(a, 1) + torch.return_types.max(values=tensor([0.8475, 1.1949, 1.5717, 1.0036]), indices=tensor([3, 0, 0, 1])) + +.. function:: max(input, other, *, out=None) -> Tensor + :noindex: + +See :func:`torch.maximum`. + +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.maximum, + r""" +maximum(input, other, *, out=None) -> Tensor + +Computes the element-wise maximum of :attr:`input` and :attr:`other`. + +.. note:: + If one of the elements being compared is a NaN, then that element is returned. + :func:`maximum` is not supported for tensors with complex dtypes. + +Args: + {input} + other (Tensor): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor((1, 2, -1)) + >>> b = torch.tensor((3, 0, 4)) + >>> torch.maximum(a, b) + tensor([3, 2, 4]) +""".format( + **common_args + ), +) + +add_docstr( + torch.fmax, + r""" +fmax(input, other, *, out=None) -> Tensor + +Computes the element-wise maximum of :attr:`input` and :attr:`other`. + +This is like :func:`torch.maximum` except it handles NaNs differently: +if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the maximum. +Only if both elements are NaN is NaN propagated. + +This function is a wrapper around C++'s ``std::fmax`` and is similar to NumPy's ``fmax`` function. + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer and floating-point inputs. + +Args: + {input} + other (Tensor): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([9.7, float('nan'), 3.1, float('nan')]) + >>> b = torch.tensor([-2.2, 0.5, float('nan'), float('nan')]) + >>> torch.fmax(a, b) + tensor([9.7000, 0.5000, 3.1000, nan]) +""".format( + **common_args + ), +) + +add_docstr( + torch.amax, + r""" +amax(input, dim, keepdim=False, *, out=None) -> Tensor + +Returns the maximum value of each slice of the :attr:`input` tensor in the given +dimension(s) :attr:`dim`. + +.. note:: + The difference between ``max``/``min`` and ``amax``/``amin`` is: + - ``amax``/``amin`` supports reducing on multiple dimensions, + - ``amax``/``amin`` does not return indices, + - ``amax``/``amin`` evenly distributes gradient between equal values, + while ``max(dim)``/``min(dim)`` propagates gradient only to a single + index in the source tensor. + +{keepdim_details} + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.8177, 1.4878, -0.2491, 0.9130], + [-0.7158, 1.1775, 2.0992, 0.4817], + [-0.0053, 0.0164, -1.3738, -0.0507], + [ 1.9700, 1.1106, -1.0318, -1.0816]]) + >>> torch.amax(a, 1) + tensor([1.4878, 2.0992, 0.0164, 1.9700]) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.argmax, + r""" +argmax(input) -> LongTensor + +Returns the indices of the maximum value of all elements in the :attr:`input` tensor. + +This is the second value returned by :meth:`torch.max`. See its +documentation for the exact semantics of this method. + +.. note:: If there are multiple maximal values then the indices of the first maximal value are returned. + +Args: + {input} + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 1.3398, 0.2663, -0.2686, 0.2450], + [-0.7401, -0.8805, -0.3402, -1.1936], + [ 0.4907, -1.3948, -1.0691, -0.3132], + [-1.6092, 0.5419, -0.2993, 0.3195]]) + >>> torch.argmax(a) + tensor(0) + +.. function:: argmax(input, dim, keepdim=False) -> LongTensor + :noindex: + +Returns the indices of the maximum values of a tensor across a dimension. + +This is the second value returned by :meth:`torch.max`. See its +documentation for the exact semantics of this method. + +Args: + {input} + {dim} If ``None``, the argmax of the flattened input is returned. + {keepdim} + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 1.3398, 0.2663, -0.2686, 0.2450], + [-0.7401, -0.8805, -0.3402, -1.1936], + [ 0.4907, -1.3948, -1.0691, -0.3132], + [-1.6092, 0.5419, -0.2993, 0.3195]]) + >>> torch.argmax(a, dim=1) + tensor([ 0, 2, 0, 1]) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.argwhere, + r""" +argwhere(input) -> Tensor + +Returns a tensor containing the indices of all non-zero elements of +:attr:`input`. Each row in the result contains the indices of a non-zero +element in :attr:`input`. The result is sorted lexicographically, with +the last index changing the fastest (C-style). + +If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor +:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of +non-zero elements in the :attr:`input` tensor. + +.. note:: + This function is similar to NumPy's `argwhere`. + + When :attr:`input` is on CUDA, this function causes host-device synchronization. + +Args: + {input} + +Example:: + + >>> t = torch.tensor([1, 0, 1]) + >>> torch.argwhere(t) + tensor([[0], + [2]]) + >>> t = torch.tensor([[1, 0, 1], [0, 1, 1]]) + >>> torch.argwhere(t) + tensor([[0, 0], + [0, 2], + [1, 1], + [1, 2]]) +""", +) + +add_docstr( + torch.mean, + r""" +mean(input, *, dtype=None) -> Tensor + +Returns the mean value of all elements in the :attr:`input` tensor. Input must be floating point or complex. + +Args: + input (Tensor): + the input tensor, either of floating point or complex dtype + +Keyword args: + {dtype} + +Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.2294, -0.5481, 1.3288]]) + >>> torch.mean(a) + tensor(0.3367) + +.. function:: mean(input, dim, keepdim=False, *, dtype=None, out=None) -> Tensor + :noindex: + +Returns the mean value of each row of the :attr:`input` tensor in the given +dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, +reduce over all of them. + +{keepdim_details} + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + {dtype} + {out} + +.. seealso:: + + :func:`torch.nanmean` computes the mean value of `non-NaN` elements. + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.3841, 0.6320, 0.4254, -0.7384], + [-0.9644, 1.0131, -0.6549, -1.4279], + [-0.2951, -1.3350, -0.7694, 0.5600], + [ 1.0842, -0.9580, 0.3623, 0.2343]]) + >>> torch.mean(a, 1) + tensor([-0.0163, -0.5085, -0.4599, 0.1807]) + >>> torch.mean(a, 1, True) + tensor([[-0.0163], + [-0.5085], + [-0.4599], + [ 0.1807]]) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.nanmean, + r""" +nanmean(input, dim=None, keepdim=False, *, dtype=None, out=None) -> Tensor + +Computes the mean of all `non-NaN` elements along the specified dimensions. + +This function is identical to :func:`torch.mean` when there are no `NaN` values +in the :attr:`input` tensor. In the presence of `NaN`, :func:`torch.mean` will +propagate the `NaN` to the output whereas :func:`torch.nanmean` will ignore the +`NaN` values (`torch.nanmean(a)` is equivalent to `torch.mean(a[~a.isnan()])`). + +{keepdim_details} + +Args: + {input} + {opt_dim} + {keepdim} + +Keyword args: + {dtype} + {out} + +.. seealso:: + + :func:`torch.mean` computes the mean value, propagating `NaN`. + +Example:: + + >>> x = torch.tensor([[torch.nan, 1, 2], [1, 2, 3]]) + >>> x.mean() + tensor(nan) + >>> x.nanmean() + tensor(1.8000) + >>> x.mean(dim=0) + tensor([ nan, 1.5000, 2.5000]) + >>> x.nanmean(dim=0) + tensor([1.0000, 1.5000, 2.5000]) + + # If all elements in the reduced dimensions are NaN then the result is NaN + >>> torch.tensor([torch.nan]).nanmean() + tensor(nan) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.median, + r""" +median(input) -> Tensor + +Returns the median of the values in :attr:`input`. + +.. note:: + The median is not unique for :attr:`input` tensors with an even number + of elements. In this case the lower of the two medians is returned. To + compute the mean of both medians, use :func:`torch.quantile` with ``q=0.5`` instead. + +.. warning:: + This function produces deterministic (sub)gradients unlike ``median(dim=0)`` + +Args: + {input} + +Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 1.5219, -1.5212, 0.2202]]) + >>> torch.median(a) + tensor(0.2202) + +.. function:: median(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + +Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` +in the dimension :attr:`dim`, and ``indices`` contains the index of the median values found in the dimension :attr:`dim`. + +By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. + +If :attr:`keepdim` is ``True``, the output tensors are of the same size +as :attr:`input` except in the dimension :attr:`dim` where they are of size 1. +Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in +the outputs tensor having 1 fewer dimension than :attr:`input`. + +.. note:: + The median is not unique for :attr:`input` tensors with an even number + of elements in the dimension :attr:`dim`. In this case the lower of the + two medians is returned. To compute the mean of both medians in + :attr:`input`, use :func:`torch.quantile` with ``q=0.5`` instead. + +.. warning:: + ``indices`` does not necessarily contain the first occurrence of each + median value found, unless it is unique. + The exact implementation details are device-specific. + Do not expect the same result when run on CPU and GPU in general. + For the same reason do not expect the gradients to be deterministic. + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second + tensor, which must have dtype long, with their indices in the dimension + :attr:`dim` of :attr:`input`. + +Example:: + + >>> a = torch.randn(4, 5) + >>> a + tensor([[ 0.2505, -0.3982, -0.9948, 0.3518, -1.3131], + [ 0.3180, -0.6993, 1.0436, 0.0438, 0.2270], + [-0.2751, 0.7303, 0.2192, 0.3321, 0.2488], + [ 1.0778, -1.9510, 0.7048, 0.4742, -0.7125]]) + >>> torch.median(a, 1) + torch.return_types.median(values=tensor([-0.3982, 0.2270, 0.2488, 0.4742]), indices=tensor([1, 4, 4, 3])) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.nanmedian, + r""" +nanmedian(input) -> Tensor + +Returns the median of the values in :attr:`input`, ignoring ``NaN`` values. + +This function is identical to :func:`torch.median` when there are no ``NaN`` values in :attr:`input`. +When :attr:`input` has one or more ``NaN`` values, :func:`torch.median` will always return ``NaN``, +while this function will return the median of the non-``NaN`` elements in :attr:`input`. +If all the elements in :attr:`input` are ``NaN`` it will also return ``NaN``. + +Args: + {input} + +Example:: + + >>> a = torch.tensor([1, float('nan'), 3, 2]) + >>> a.median() + tensor(nan) + >>> a.nanmedian() + tensor(2.) + +.. function:: nanmedian(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + +Returns a namedtuple ``(values, indices)`` where ``values`` contains the median of each row of :attr:`input` +in the dimension :attr:`dim`, ignoring ``NaN`` values, and ``indices`` contains the index of the median values +found in the dimension :attr:`dim`. + +This function is identical to :func:`torch.median` when there are no ``NaN`` values in a reduced row. When a reduced row has +one or more ``NaN`` values, :func:`torch.median` will always reduce it to ``NaN``, while this function will reduce it to the +median of the non-``NaN`` elements. If all the elements in a reduced row are ``NaN`` then it will be reduced to ``NaN``, too. + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + out ((Tensor, Tensor), optional): The first tensor will be populated with the median values and the second + tensor, which must have dtype long, with their indices in the dimension + :attr:`dim` of :attr:`input`. + +Example:: + + >>> a = torch.tensor([[2, 3, 1], [float('nan'), 1, float('nan')]]) + >>> a + tensor([[2., 3., 1.], + [nan, 1., nan]]) + >>> a.median(0) + torch.return_types.median(values=tensor([nan, 1., nan]), indices=tensor([1, 1, 1])) + >>> a.nanmedian(0) + torch.return_types.nanmedian(values=tensor([2., 1., 1.]), indices=tensor([0, 1, 0])) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.quantile, + r""" +quantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor + +Computes the q-th quantiles of each row of the :attr:`input` tensor along the dimension :attr:`dim`. + +To compute the quantile, we map q in [0, 1] to the range of indices [0, n] to find the location +of the quantile in the sorted input. If the quantile lies between two data points ``a < b`` with +indices ``i`` and ``j`` in the sorted order, result is computed according to the given +:attr:`interpolation` method as follows: + +- ``linear``: ``a + (b - a) * fraction``, where ``fraction`` is the fractional part of the computed quantile index. +- ``lower``: ``a``. +- ``higher``: ``b``. +- ``nearest``: ``a`` or ``b``, whichever's index is closer to the computed quantile index (rounding down for .5 fractions). +- ``midpoint``: ``(a + b) / 2``. + +If :attr:`q` is a 1D tensor, the first dimension of the output represents the quantiles and has size +equal to the size of :attr:`q`, the remaining dimensions are what remains from the reduction. + +.. note:: + By default :attr:`dim` is ``None`` resulting in the :attr:`input` tensor being flattened before computation. + +Args: + {input} + q (float or Tensor): a scalar or 1D tensor of values in the range [0, 1]. + {dim} + {keepdim} + +Keyword arguments: + interpolation (str): interpolation method to use when the desired quantile lies between two data points. + Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``. + Default is ``linear``. + {out} + +Example:: + + >>> a = torch.randn(2, 3) + >>> a + tensor([[ 0.0795, -1.2117, 0.9765], + [ 1.1707, 0.6706, 0.4884]]) + >>> q = torch.tensor([0.25, 0.5, 0.75]) + >>> torch.quantile(a, q, dim=1, keepdim=True) + tensor([[[-0.5661], + [ 0.5795]], + + [[ 0.0795], + [ 0.6706]], + + [[ 0.5280], + [ 0.9206]]]) + >>> torch.quantile(a, q, dim=1, keepdim=True).shape + torch.Size([3, 2, 1]) + >>> a = torch.arange(4.) + >>> a + tensor([0., 1., 2., 3.]) + >>> torch.quantile(a, 0.6, interpolation='linear') + tensor(1.8000) + >>> torch.quantile(a, 0.6, interpolation='lower') + tensor(1.) + >>> torch.quantile(a, 0.6, interpolation='higher') + tensor(2.) + >>> torch.quantile(a, 0.6, interpolation='midpoint') + tensor(1.5000) + >>> torch.quantile(a, 0.6, interpolation='nearest') + tensor(2.) + >>> torch.quantile(a, 0.4, interpolation='nearest') + tensor(1.) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.nanquantile, + r""" +nanquantile(input, q, dim=None, keepdim=False, *, interpolation='linear', out=None) -> Tensor + +This is a variant of :func:`torch.quantile` that "ignores" ``NaN`` values, +computing the quantiles :attr:`q` as if ``NaN`` values in :attr:`input` did +not exist. If all values in a reduced row are ``NaN`` then the quantiles for +that reduction will be ``NaN``. See the documentation for :func:`torch.quantile`. + +Args: + {input} + q (float or Tensor): a scalar or 1D tensor of quantile values in the range [0, 1] + {dim} + {keepdim} + +Keyword arguments: + interpolation (str): interpolation method to use when the desired quantile lies between two data points. + Can be ``linear``, ``lower``, ``higher``, ``midpoint`` and ``nearest``. + Default is ``linear``. + {out} + +Example:: + + >>> t = torch.tensor([float('nan'), 1, 2]) + >>> t.quantile(0.5) + tensor(nan) + >>> t.nanquantile(0.5) + tensor(1.5000) + >>> t = torch.tensor([[float('nan'), float('nan')], [1, 2]]) + >>> t + tensor([[nan, nan], + [1., 2.]]) + >>> t.nanquantile(0.5, dim=0) + tensor([1., 2.]) + >>> t.nanquantile(0.5, dim=1) + tensor([ nan, 1.5000]) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.min, + r""" +min(input) -> Tensor + +Returns the minimum value of all elements in the :attr:`input` tensor. + +.. warning:: + This function produces deterministic (sub)gradients unlike ``min(dim=0)`` + +Args: + {input} + +Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.6750, 1.0857, 1.7197]]) + >>> torch.min(a) + tensor(0.6750) + +.. function:: min(input, dim, keepdim=False, *, out=None) -> (Tensor, LongTensor) + :noindex: + +Returns a namedtuple ``(values, indices)`` where ``values`` is the minimum +value of each row of the :attr:`input` tensor in the given dimension +:attr:`dim`. And ``indices`` is the index location of each minimum value found +(argmin). + +If :attr:`keepdim` is ``True``, the output tensors are of the same size as +:attr:`input` except in the dimension :attr:`dim` where they are of size 1. +Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in +the output tensors having 1 fewer dimension than :attr:`input`. + +.. note:: If there are multiple minimal values in a reduced row then + the indices of the first minimal value are returned. + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + out (tuple, optional): the tuple of two output tensors (min, min_indices) + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[-0.6248, 1.1334, -1.1899, -0.2803], + [-1.4644, -0.2635, -0.3651, 0.6134], + [ 0.2457, 0.0384, 1.0128, 0.7015], + [-0.1153, 2.9849, 2.1458, 0.5788]]) + >>> torch.min(a, 1) + torch.return_types.min(values=tensor([-1.1899, -1.4644, 0.0384, -0.1153]), indices=tensor([2, 0, 1, 0])) + +.. function:: min(input, other, *, out=None) -> Tensor + :noindex: + +See :func:`torch.minimum`. +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.minimum, + r""" +minimum(input, other, *, out=None) -> Tensor + +Computes the element-wise minimum of :attr:`input` and :attr:`other`. + +.. note:: + If one of the elements being compared is a NaN, then that element is returned. + :func:`minimum` is not supported for tensors with complex dtypes. + +Args: + {input} + other (Tensor): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor((1, 2, -1)) + >>> b = torch.tensor((3, 0, 4)) + >>> torch.minimum(a, b) + tensor([1, 0, -1]) +""".format( + **common_args + ), +) + +add_docstr( + torch.fmin, + r""" +fmin(input, other, *, out=None) -> Tensor + +Computes the element-wise minimum of :attr:`input` and :attr:`other`. + +This is like :func:`torch.minimum` except it handles NaNs differently: +if exactly one of the two elements being compared is a NaN then the non-NaN element is taken as the minimum. +Only if both elements are NaN is NaN propagated. + +This function is a wrapper around C++'s ``std::fmin`` and is similar to NumPy's ``fmin`` function. + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer and floating-point inputs. + +Args: + {input} + other (Tensor): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([2.2, float('nan'), 2.1, float('nan')]) + >>> b = torch.tensor([-9.3, 0.1, float('nan'), float('nan')]) + >>> torch.fmin(a, b) + tensor([-9.3000, 0.1000, 2.1000, nan]) +""".format( + **common_args + ), +) + +add_docstr( + torch.amin, + r""" +amin(input, dim, keepdim=False, *, out=None) -> Tensor + +Returns the minimum value of each slice of the :attr:`input` tensor in the given +dimension(s) :attr:`dim`. + +.. note:: + The difference between ``max``/``min`` and ``amax``/``amin`` is: + - ``amax``/``amin`` supports reducing on multiple dimensions, + - ``amax``/``amin`` does not return indices, + - ``amax``/``amin`` evenly distributes gradient between equal values, + while ``max(dim)``/``min(dim)`` propagates gradient only to a single + index in the source tensor. + +{keepdim_details} + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.6451, -0.4866, 0.2987, -1.3312], + [-0.5744, 1.2980, 1.8397, -0.2713], + [ 0.9128, 0.9214, -1.7268, -0.2995], + [ 0.9023, 0.4853, 0.9075, -1.6165]]) + >>> torch.amin(a, 1) + tensor([-1.3312, -0.5744, -1.7268, -1.6165]) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.aminmax, + r""" +aminmax(input, *, dim=None, keepdim=False, out=None) -> (Tensor min, Tensor max) + +Computes the minimum and maximum values of the :attr:`input` tensor. + +Args: + input (Tensor): + The input tensor + +Keyword Args: + dim (Optional[int]): + The dimension along which to compute the values. If `None`, + computes the values over the entire :attr:`input` tensor. + Default is `None`. + keepdim (bool): + If `True`, the reduced dimensions will be kept in the output + tensor as dimensions with size 1 for broadcasting, otherwise + they will be removed, as if calling (:func:`torch.squeeze`). + Default is `False`. + out (Optional[Tuple[Tensor, Tensor]]): + Optional tensors on which to write the result. Must have the same + shape and dtype as the expected output. + Default is `None`. + +Returns: + A named tuple `(min, max)` containing the minimum and maximum values. + +Raises: + RuntimeError + If any of the dimensions to compute the values over has size 0. + +.. note:: + NaN values are propagated to the output if at least one value is NaN. + +.. seealso:: + :func:`torch.amin` computes just the minimum value + :func:`torch.amax` computes just the maximum value + +Example:: + + >>> torch.aminmax(torch.tensor([1, -3, 5])) + torch.return_types.aminmax( + min=tensor(-3), + max=tensor(5)) + + >>> # aminmax propagates NaNs + >>> torch.aminmax(torch.tensor([1, -3, 5, torch.nan])) + torch.return_types.aminmax( + min=tensor(nan), + max=tensor(nan)) + + >>> t = torch.arange(10).view(2, 5) + >>> t + tensor([[0, 1, 2, 3, 4], + [5, 6, 7, 8, 9]]) + >>> t.aminmax(dim=0, keepdim=True) + torch.return_types.aminmax( + min=tensor([[0, 1, 2, 3, 4]]), + max=tensor([[5, 6, 7, 8, 9]])) +""", +) + +add_docstr( + torch.argmin, + r""" +argmin(input, dim=None, keepdim=False) -> LongTensor + +Returns the indices of the minimum value(s) of the flattened tensor or along a dimension + +This is the second value returned by :meth:`torch.min`. See its +documentation for the exact semantics of this method. + +.. note:: If there are multiple minimal values then the indices of the first minimal value are returned. + +Args: + {input} + {dim} If ``None``, the argmin of the flattened input is returned. + {keepdim} + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.1139, 0.2254, -0.1381, 0.3687], + [ 1.0100, -1.1975, -0.0102, -0.4732], + [-0.9240, 0.1207, -0.7506, -1.0213], + [ 1.7809, -1.2960, 0.9384, 0.1438]]) + >>> torch.argmin(a) + tensor(13) + >>> torch.argmin(a, dim=1) + tensor([ 2, 1, 3, 1]) + >>> torch.argmin(a, dim=1, keepdim=True) + tensor([[2], + [1], + [3], + [1]]) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.mm, + r""" +mm(input, mat2, *, out=None) -> Tensor + +Performs a matrix multiplication of the matrices :attr:`input` and :attr:`mat2`. + +If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`mat2` is a +:math:`(m \times p)` tensor, :attr:`out` will be a :math:`(n \times p)` tensor. + +.. note:: This function does not :ref:`broadcast `. + For broadcasting matrix products, see :func:`torch.matmul`. + +Supports strided and sparse 2-D tensors as inputs, autograd with +respect to strided inputs. + +This operation has support for arguments with :ref:`sparse layouts`. +If :attr:`out` is provided it's layout will be used. Otherwise, the result +layout will be deduced from that of :attr:`input`. + +{sparse_beta_warning} + +{tf32_note} + +{rocm_fp16_note} + +Args: + input (Tensor): the first matrix to be matrix multiplied + mat2 (Tensor): the second matrix to be matrix multiplied + +Keyword args: + {out} + +Example:: + + >>> mat1 = torch.randn(2, 3) + >>> mat2 = torch.randn(3, 3) + >>> torch.mm(mat1, mat2) + tensor([[ 0.4851, 0.5037, -0.3633], + [-0.0760, -3.6705, 2.4784]]) +""".format( + **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes + ), +) + +add_docstr( + torch.hspmm, + r""" +hspmm(mat1, mat2, *, out=None) -> Tensor + +Performs a matrix multiplication of a :ref:`sparse COO matrix +` :attr:`mat1` and a strided matrix :attr:`mat2`. The +result is a (1 + 1)-dimensional :ref:`hybrid COO matrix +`. + +Args: + mat1 (Tensor): the first sparse matrix to be matrix multiplied + mat2 (Tensor): the second strided matrix to be matrix multiplied + +Keyword args: + {out} +""".format( + **common_args + ), +) + +add_docstr( + torch.matmul, + r""" +matmul(input, other, *, out=None) -> Tensor + +Matrix product of two tensors. + +The behavior depends on the dimensionality of the tensors as follows: + +- If both tensors are 1-dimensional, the dot product (scalar) is returned. +- If both arguments are 2-dimensional, the matrix-matrix product is returned. +- If the first argument is 1-dimensional and the second argument is 2-dimensional, + a 1 is prepended to its dimension for the purpose of the matrix multiply. + After the matrix multiply, the prepended dimension is removed. +- If the first argument is 2-dimensional and the second argument is 1-dimensional, + the matrix-vector product is returned. +- If both arguments are at least 1-dimensional and at least one argument is + N-dimensional (where N > 2), then a batched matrix multiply is returned. If the first + argument is 1-dimensional, a 1 is prepended to its dimension for the purpose of the + batched matrix multiply and removed after. If the second argument is 1-dimensional, a + 1 is appended to its dimension for the purpose of the batched matrix multiple and removed after. + The non-matrix (i.e. batch) dimensions are :ref:`broadcasted ` (and thus + must be broadcastable). For example, if :attr:`input` is a + :math:`(j \times 1 \times n \times n)` tensor and :attr:`other` is a :math:`(k \times n \times n)` + tensor, :attr:`out` will be a :math:`(j \times k \times n \times n)` tensor. + + Note that the broadcasting logic only looks at the batch dimensions when determining if the inputs + are broadcastable, and not the matrix dimensions. For example, if :attr:`input` is a + :math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is a :math:`(k \times m \times p)` + tensor, these inputs are valid for broadcasting even though the final two dimensions (i.e. the + matrix dimensions) are different. :attr:`out` will be a :math:`(j \times k \times n \times p)` tensor. + +This operation has support for arguments with :ref:`sparse layouts`. In particular the +matrix-matrix (both arguments 2-dimensional) supports sparse arguments with the same restrictions +as :func:`torch.mm` + +{sparse_beta_warning} + +{tf32_note} + +{rocm_fp16_note} + +.. note:: + + The 1-dimensional dot product version of this function does not support an :attr:`out` parameter. + +Arguments: + input (Tensor): the first tensor to be multiplied + other (Tensor): the second tensor to be multiplied + +Keyword args: + {out} + +Example:: + + >>> # vector x vector + >>> tensor1 = torch.randn(3) + >>> tensor2 = torch.randn(3) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([]) + >>> # matrix x vector + >>> tensor1 = torch.randn(3, 4) + >>> tensor2 = torch.randn(4) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([3]) + >>> # batched matrix x broadcasted vector + >>> tensor1 = torch.randn(10, 3, 4) + >>> tensor2 = torch.randn(4) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([10, 3]) + >>> # batched matrix x batched matrix + >>> tensor1 = torch.randn(10, 3, 4) + >>> tensor2 = torch.randn(10, 4, 5) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([10, 3, 5]) + >>> # batched matrix x broadcasted matrix + >>> tensor1 = torch.randn(10, 3, 4) + >>> tensor2 = torch.randn(4, 5) + >>> torch.matmul(tensor1, tensor2).size() + torch.Size([10, 3, 5]) + +""".format( + **common_args, **tf32_notes, **rocm_fp16_notes, **sparse_support_notes + ), +) + +add_docstr( + torch.mode, + r""" +mode(input, dim=-1, keepdim=False, *, out=None) -> (Tensor, LongTensor) + +Returns a namedtuple ``(values, indices)`` where ``values`` is the mode +value of each row of the :attr:`input` tensor in the given dimension +:attr:`dim`, i.e. a value which appears most often +in that row, and ``indices`` is the index location of each mode value found. + +By default, :attr:`dim` is the last dimension of the :attr:`input` tensor. + +If :attr:`keepdim` is ``True``, the output tensors are of the same size as +:attr:`input` except in the dimension :attr:`dim` where they are of size 1. +Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting +in the output tensors having 1 fewer dimension than :attr:`input`. + +.. note:: This function is not defined for ``torch.cuda.Tensor`` yet. + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + out (tuple, optional): the result tuple of two output tensors (values, indices) + +Example:: + + >>> b = torch.tensor( + [[0, 0, 0, 2, 0, 0, 2], + [0, 3, 0, 0, 2, 0, 1], + [2, 2, 2, 0, 0, 0, 3], + [2, 2, 3, 0, 1, 1, 0], + [1, 1, 0, 0, 2, 0, 2]]) + >>> torch.mode(b, 0) + torch.return_types.mode( + values=tensor([0, 2, 0, 0, 0, 0, 2]), + indices=tensor([1, 3, 4, 4, 2, 4, 4])) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.mul, + r""" +mul(input, other, *, out=None) -> Tensor + +Multiplies :attr:`input` by :attr:`other`. + + +.. math:: + \text{out}_i = \text{input}_i \times \text{other}_i +""" + + r""" + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer, float, and complex inputs. + +Args: + {input} + other (Tensor or Number) - the tensor or number to multiply input by. + +Keyword args: + {out} + +Examples:: + + >>> a = torch.randn(3) + >>> a + tensor([ 0.2015, -0.4255, 2.6087]) + >>> torch.mul(a, 100) + tensor([ 20.1494, -42.5491, 260.8663]) + + >>> b = torch.randn(4, 1) + >>> b + tensor([[ 1.1207], + [-0.3137], + [ 0.0700], + [ 0.8378]]) + >>> c = torch.randn(1, 4) + >>> c + tensor([[ 0.5146, 0.1216, -0.5244, 2.2382]]) + >>> torch.mul(b, c) + tensor([[ 0.5767, 0.1363, -0.5877, 2.5083], + [-0.1614, -0.0382, 0.1645, -0.7021], + [ 0.0360, 0.0085, -0.0367, 0.1567], + [ 0.4312, 0.1019, -0.4394, 1.8753]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.multiply, + r""" +multiply(input, other, *, out=None) + +Alias for :func:`torch.mul`. +""", +) + +add_docstr( + torch.multinomial, + r""" +multinomial(input, num_samples, replacement=False, *, generator=None, out=None) -> LongTensor + +Returns a tensor where each row contains :attr:`num_samples` indices sampled +from the multinomial (a stricter definition would be multivariate, +refer to torch.distributions.multinomial.Multinomial for more details) +probability distribution located in the corresponding row +of tensor :attr:`input`. + +.. note:: + The rows of :attr:`input` do not need to sum to one (in which case we use + the values as weights), but must be non-negative, finite and have + a non-zero sum. + +Indices are ordered from left to right according to when each was sampled +(first samples are placed in first column). + +If :attr:`input` is a vector, :attr:`out` is a vector of size :attr:`num_samples`. + +If :attr:`input` is a matrix with `m` rows, :attr:`out` is an matrix of shape +:math:`(m \times \text{{num\_samples}})`. + +If replacement is ``True``, samples are drawn with replacement. + +If not, they are drawn without replacement, which means that when a +sample index is drawn for a row, it cannot be drawn again for that row. + +.. note:: + When drawn without replacement, :attr:`num_samples` must be lower than + number of non-zero elements in :attr:`input` (or the min number of non-zero + elements in each row of :attr:`input` if it is a matrix). + +Args: + input (Tensor): the input tensor containing probabilities + num_samples (int): number of samples to draw + replacement (bool, optional): whether to draw with replacement or not + +Keyword args: + {generator} + {out} + +Example:: + + >>> weights = torch.tensor([0, 10, 3, 0], dtype=torch.float) # create a tensor of weights + >>> torch.multinomial(weights, 2) + tensor([1, 2]) + >>> torch.multinomial(weights, 4) # ERROR! + RuntimeError: invalid argument 2: invalid multinomial distribution (with replacement=False, + not enough non-negative category to sample) at ../aten/src/TH/generic/THTensorRandom.cpp:320 + >>> torch.multinomial(weights, 4, replacement=True) + tensor([ 2, 1, 1, 1]) +""".format( + **common_args + ), +) + +add_docstr( + torch.mv, + r""" +mv(input, vec, *, out=None) -> Tensor + +Performs a matrix-vector product of the matrix :attr:`input` and the vector +:attr:`vec`. + +If :attr:`input` is a :math:`(n \times m)` tensor, :attr:`vec` is a 1-D tensor of +size :math:`m`, :attr:`out` will be 1-D of size :math:`n`. + +.. note:: This function does not :ref:`broadcast `. + +Args: + input (Tensor): matrix to be multiplied + vec (Tensor): vector to be multiplied + +Keyword args: + {out} + +Example:: + + >>> mat = torch.randn(2, 3) + >>> vec = torch.randn(3) + >>> torch.mv(mat, vec) + tensor([ 1.0404, -0.6361]) +""".format( + **common_args + ), +) + +add_docstr( + torch.mvlgamma, + r""" +mvlgamma(input, p, *, out=None) -> Tensor + +Alias for :func:`torch.special.multigammaln`. +""", +) + +add_docstr( + torch.movedim, + r""" +movedim(input, source, destination) -> Tensor + +Moves the dimension(s) of :attr:`input` at the position(s) in :attr:`source` +to the position(s) in :attr:`destination`. + +Other dimensions of :attr:`input` that are not explicitly moved remain in +their original order and appear at the positions not specified in :attr:`destination`. + +Args: + {input} + source (int or tuple of ints): Original positions of the dims to move. These must be unique. + destination (int or tuple of ints): Destination positions for each of the original dims. These must also be unique. + +Examples:: + + >>> t = torch.randn(3,2,1) + >>> t + tensor([[[-0.3362], + [-0.8437]], + + [[-0.9627], + [ 0.1727]], + + [[ 0.5173], + [-0.1398]]]) + >>> torch.movedim(t, 1, 0).shape + torch.Size([2, 3, 1]) + >>> torch.movedim(t, 1, 0) + tensor([[[-0.3362], + [-0.9627], + [ 0.5173]], + + [[-0.8437], + [ 0.1727], + [-0.1398]]]) + >>> torch.movedim(t, (1, 2), (0, 1)).shape + torch.Size([2, 1, 3]) + >>> torch.movedim(t, (1, 2), (0, 1)) + tensor([[[-0.3362, -0.9627, 0.5173]], + + [[-0.8437, 0.1727, -0.1398]]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.moveaxis, + r""" +moveaxis(input, source, destination) -> Tensor + +Alias for :func:`torch.movedim`. + +This function is equivalent to NumPy's moveaxis function. + +Examples:: + + >>> t = torch.randn(3,2,1) + >>> t + tensor([[[-0.3362], + [-0.8437]], + + [[-0.9627], + [ 0.1727]], + + [[ 0.5173], + [-0.1398]]]) + >>> torch.moveaxis(t, 1, 0).shape + torch.Size([2, 3, 1]) + >>> torch.moveaxis(t, 1, 0) + tensor([[[-0.3362], + [-0.9627], + [ 0.5173]], + + [[-0.8437], + [ 0.1727], + [-0.1398]]]) + >>> torch.moveaxis(t, (1, 2), (0, 1)).shape + torch.Size([2, 1, 3]) + >>> torch.moveaxis(t, (1, 2), (0, 1)) + tensor([[[-0.3362, -0.9627, 0.5173]], + + [[-0.8437, 0.1727, -0.1398]]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.swapdims, + r""" +swapdims(input, dim0, dim1) -> Tensor + +Alias for :func:`torch.transpose`. + +This function is equivalent to NumPy's swapaxes function. + +Examples:: + + >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]]) + >>> x + tensor([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]) + >>> torch.swapdims(x, 0, 1) + tensor([[[0, 1], + [4, 5]], + + [[2, 3], + [6, 7]]]) + >>> torch.swapdims(x, 0, 2) + tensor([[[0, 4], + [2, 6]], + + [[1, 5], + [3, 7]]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.swapaxes, + r""" +swapaxes(input, axis0, axis1) -> Tensor + +Alias for :func:`torch.transpose`. + +This function is equivalent to NumPy's swapaxes function. + +Examples:: + + >>> x = torch.tensor([[[0,1],[2,3]],[[4,5],[6,7]]]) + >>> x + tensor([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]) + >>> torch.swapaxes(x, 0, 1) + tensor([[[0, 1], + [4, 5]], + + [[2, 3], + [6, 7]]]) + >>> torch.swapaxes(x, 0, 2) + tensor([[[0, 4], + [2, 6]], + + [[1, 5], + [3, 7]]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.narrow, + r""" +narrow(input, dim, start, length) -> Tensor + +Returns a new tensor that is a narrowed version of :attr:`input` tensor. The +dimension :attr:`dim` is input from :attr:`start` to ``start + length``. The +returned tensor and :attr:`input` tensor share the same underlying storage. + +Args: + input (Tensor): the tensor to narrow + dim (int): the dimension along which to narrow + start (int or Tensor): index of the element to start the narrowed dimension + from. Can be negative, which means indexing from the end of `dim`. If + `Tensor`, it must be an 0-dim integral `Tensor` (bools not allowed) + length (int): length of the narrowed dimension, must be weakly positive + +Example:: + + >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> torch.narrow(x, 0, 0, 2) + tensor([[ 1, 2, 3], + [ 4, 5, 6]]) + >>> torch.narrow(x, 1, 1, 2) + tensor([[ 2, 3], + [ 5, 6], + [ 8, 9]]) + >>> torch.narrow(x, -1, torch.tensor(-1), 1) + tensor([[3], + [6], + [9]]) +""", +) + +add_docstr( + torch.narrow_copy, + r""" +narrow_copy(input, dim, start, length, *, out=None) -> Tensor + +Same as :meth:`Tensor.narrow` except this returns a copy rather +than shared storage. This is primarily for sparse tensors, which +do not have a shared-storage narrow method. + +Args: + input (Tensor): the tensor to narrow + dim (int): the dimension along which to narrow + start (int): index of the element to start the narrowed dimension from. Can + be negative, which means indexing from the end of `dim` + length (int): length of the narrowed dimension, must be weakly positive + +Keyword args: + {out} + +Example:: + + >>> x = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> torch.narrow_copy(x, 0, 0, 2) + tensor([[ 1, 2, 3], + [ 4, 5, 6]]) + >>> torch.narrow_copy(x, 1, 1, 2) + tensor([[ 2, 3], + [ 5, 6], + [ 8, 9]]) + >>> s = torch.arange(16).reshape(2, 2, 2, 2).to_sparse(2) + >>> torch.narrow_copy(s, 0, 0, 1) + tensor(indices=tensor([[0, 0], + [0, 1]]), + values=tensor([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]), + size=(1, 2, 2, 2), nnz=2, layout=torch.sparse_coo) + +.. seealso:: + + :func:`torch.narrow` for a non copy variant + +""".format( + **common_args + ), +) + +add_docstr( + torch.nan_to_num, + r""" +nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None) -> Tensor + +Replaces :literal:`NaN`, positive infinity, and negative infinity values in :attr:`input` +with the values specified by :attr:`nan`, :attr:`posinf`, and :attr:`neginf`, respectively. +By default, :literal:`NaN`\ s are replaced with zero, positive infinity is replaced with the +greatest finite value representable by :attr:`input`'s dtype, and negative infinity +is replaced with the least finite value representable by :attr:`input`'s dtype. + +Args: + {input} + nan (Number, optional): the value to replace :literal:`NaN`\s with. Default is zero. + posinf (Number, optional): if a Number, the value to replace positive infinity values with. + If None, positive infinity values are replaced with the greatest finite value representable by :attr:`input`'s dtype. + Default is None. + neginf (Number, optional): if a Number, the value to replace negative infinity values with. + If None, negative infinity values are replaced with the lowest finite value representable by :attr:`input`'s dtype. + Default is None. + +Keyword args: + {out} + +Example:: + + >>> x = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14]) + >>> torch.nan_to_num(x) + tensor([ 0.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00]) + >>> torch.nan_to_num(x, nan=2.0) + tensor([ 2.0000e+00, 3.4028e+38, -3.4028e+38, 3.1400e+00]) + >>> torch.nan_to_num(x, nan=2.0, posinf=1.0) + tensor([ 2.0000e+00, 1.0000e+00, -3.4028e+38, 3.1400e+00]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.ne, + r""" +ne(input, other, *, out=None) -> Tensor + +Computes :math:`\text{input} \neq \text{other}` element-wise. +""" + + r""" + +The second argument can be a number or a tensor whose shape is +:ref:`broadcastable ` with the first argument. + +Args: + input (Tensor): the tensor to compare + other (Tensor or float): the tensor or value to compare + +Keyword args: + {out} + +Returns: + A boolean tensor that is True where :attr:`input` is not equal to :attr:`other` and False elsewhere + +Example:: + + >>> torch.ne(torch.tensor([[1, 2], [3, 4]]), torch.tensor([[1, 1], [4, 4]])) + tensor([[False, True], [True, False]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.not_equal, + r""" +not_equal(input, other, *, out=None) -> Tensor + +Alias for :func:`torch.ne`. +""", +) + +add_docstr( + torch.neg, + r""" +neg(input, *, out=None) -> Tensor + +Returns a new tensor with the negative of the elements of :attr:`input`. + +.. math:: + \text{out} = -1 \times \text{input} +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(5) + >>> a + tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) + >>> torch.neg(a) + tensor([-0.0090, 0.2262, 0.0682, 0.2866, -0.3940]) +""".format( + **common_args + ), +) + +add_docstr( + torch.negative, + r""" +negative(input, *, out=None) -> Tensor + +Alias for :func:`torch.neg` +""", +) + +add_docstr( + torch.nextafter, + r""" +nextafter(input, other, *, out=None) -> Tensor + +Return the next floating-point value after :attr:`input` towards :attr:`other`, elementwise. + +The shapes of ``input`` and ``other`` must be +:ref:`broadcastable `. + +Args: + input (Tensor): the first input tensor + other (Tensor): the second input tensor + +Keyword args: + {out} + +Example:: + + >>> eps = torch.finfo(torch.float32).eps + >>> torch.nextafter(torch.tensor([1.0, 2.0]), torch.tensor([2.0, 1.0])) == torch.tensor([eps + 1, 2 - eps]) + tensor([True, True]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.nonzero, + r""" +nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors + +.. note:: + :func:`torch.nonzero(..., as_tuple=False) ` (default) returns a + 2-D tensor where each row is the index for a nonzero value. + + :func:`torch.nonzero(..., as_tuple=True) ` returns a tuple of 1-D + index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]`` + gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor + contains nonzero indices for a certain dimension. + + See below for more details on the two behaviors. + + When :attr:`input` is on CUDA, :func:`torch.nonzero() ` causes + host-device synchronization. + +**When** :attr:`as_tuple` **is** ``False`` **(default)**: + +Returns a tensor containing the indices of all non-zero elements of +:attr:`input`. Each row in the result contains the indices of a non-zero +element in :attr:`input`. The result is sorted lexicographically, with +the last index changing the fastest (C-style). + +If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor +:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of +non-zero elements in the :attr:`input` tensor. + +**When** :attr:`as_tuple` **is** ``True``: + +Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`, +each containing the indices (in that dimension) of all non-zero elements of +:attr:`input` . + +If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n` +tensors of size :math:`z`, where :math:`z` is the total number of +non-zero elements in the :attr:`input` tensor. + +As a special case, when :attr:`input` has zero dimensions and a nonzero scalar +value, it is treated as a one-dimensional tensor with one element. + +Args: + {input} + +Keyword args: + out (LongTensor, optional): the output tensor containing indices + +Returns: + LongTensor or tuple of LongTensor: If :attr:`as_tuple` is ``False``, the output + tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for + each dimension, containing the indices of each nonzero element along that + dimension. + +Example:: + + >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1])) + tensor([[ 0], + [ 1], + [ 2], + [ 4]]) + >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.0, 0.0, 1.2, 0.0], + ... [0.0, 0.0, 0.0,-0.4]])) + tensor([[ 0, 0], + [ 1, 1], + [ 2, 2], + [ 3, 3]]) + >>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True) + (tensor([0, 1, 2, 4]),) + >>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0], + ... [0.0, 0.4, 0.0, 0.0], + ... [0.0, 0.0, 1.2, 0.0], + ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True) + (tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3])) + >>> torch.nonzero(torch.tensor(5), as_tuple=True) + (tensor([0]),) +""".format( + **common_args + ), +) + +add_docstr( + torch.normal, + r""" +normal(mean, std, *, generator=None, out=None) -> Tensor + +Returns a tensor of random numbers drawn from separate normal distributions +whose mean and standard deviation are given. + +The :attr:`mean` is a tensor with the mean of +each output element's normal distribution + +The :attr:`std` is a tensor with the standard deviation of +each output element's normal distribution + +The shapes of :attr:`mean` and :attr:`std` don't need to match, but the +total number of elements in each tensor need to be the same. + +.. note:: When the shapes do not match, the shape of :attr:`mean` + is used as the shape for the returned output tensor + +.. note:: When :attr:`std` is a CUDA tensor, this function synchronizes + its device with the CPU. + +Args: + mean (Tensor): the tensor of per-element means + std (Tensor): the tensor of per-element standard deviations + +Keyword args: + {generator} + {out} + +Example:: + + >>> torch.normal(mean=torch.arange(1., 11.), std=torch.arange(1, 0, -0.1)) + tensor([ 1.0425, 3.5672, 2.7969, 4.2925, 4.7229, 6.2134, + 8.0505, 8.1408, 9.0563, 10.0566]) + +.. function:: normal(mean=0.0, std, *, out=None) -> Tensor + :noindex: + +Similar to the function above, but the means are shared among all drawn +elements. + +Args: + mean (float, optional): the mean for all distributions + std (Tensor): the tensor of per-element standard deviations + +Keyword args: + {out} + +Example:: + + >>> torch.normal(mean=0.5, std=torch.arange(1., 6.)) + tensor([-1.2793, -1.0732, -2.0687, 5.1177, -1.2303]) + +.. function:: normal(mean, std=1.0, *, out=None) -> Tensor + :noindex: + +Similar to the function above, but the standard deviations are shared among +all drawn elements. + +Args: + mean (Tensor): the tensor of per-element means + std (float, optional): the standard deviation for all distributions + +Keyword args: + out (Tensor, optional): the output tensor + +Example:: + + >>> torch.normal(mean=torch.arange(1., 6.)) + tensor([ 1.1552, 2.6148, 2.6535, 5.8318, 4.2361]) + +.. function:: normal(mean, std, size, *, out=None) -> Tensor + :noindex: + +Similar to the function above, but the means and standard deviations are shared +among all drawn elements. The resulting tensor has size given by :attr:`size`. + +Args: + mean (float): the mean for all distributions + std (float): the standard deviation for all distributions + size (int...): a sequence of integers defining the shape of the output tensor. + +Keyword args: + {out} + +Example:: + + >>> torch.normal(2, 3, size=(1, 4)) + tensor([[-1.3987, -1.9544, 3.6048, 0.7909]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.numel, + r""" +numel(input) -> int + +Returns the total number of elements in the :attr:`input` tensor. + +Args: + {input} + +Example:: + + >>> a = torch.randn(1, 2, 3, 4, 5) + >>> torch.numel(a) + 120 + >>> a = torch.zeros(4,4) + >>> torch.numel(a) + 16 + +""".format( + **common_args + ), +) + +add_docstr( + torch.ones, + r""" +ones(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Returns a tensor filled with the scalar value `1`, with the shape defined +by the variable argument :attr:`size`. + +Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + +Keyword arguments: + {out} + {dtype} + {layout} + {device} + {requires_grad} + +Example:: + + >>> torch.ones(2, 3) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.]]) + + >>> torch.ones(5) + tensor([ 1., 1., 1., 1., 1.]) + +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.ones_like, + r""" +ones_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + +Returns a tensor filled with the scalar value `1`, with the same size as +:attr:`input`. ``torch.ones_like(input)`` is equivalent to +``torch.ones(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + +.. warning:: + As of 0.4, this function does not support an :attr:`out` keyword. As an alternative, + the old ``torch.ones_like(input, out=output)`` is equivalent to + ``torch.ones(input.size(), out=output)``. + +Args: + {input} + +Keyword arguments: + {dtype} + {layout} + {device} + {requires_grad} + {memory_format} + +Example:: + + >>> input = torch.empty(2, 3) + >>> torch.ones_like(input) + tensor([[ 1., 1., 1.], + [ 1., 1., 1.]]) +""".format( + **factory_like_common_args + ), +) + +add_docstr( + torch.orgqr, + r""" +orgqr(input, tau) -> Tensor + +Alias for :func:`torch.linalg.householder_product`. +""", +) + +add_docstr( + torch.ormqr, + r""" +ormqr(input, tau, other, left=True, transpose=False, *, out=None) -> Tensor + +Computes the matrix-matrix multiplication of a product of Householder matrices with a general matrix. + +Multiplies a :math:`m \times n` matrix `C` (given by :attr:`other`) with a matrix `Q`, +where `Q` is represented using Householder reflectors `(input, tau)`. +See `Representation of Orthogonal or Unitary Matrices`_ for further details. + +If :attr:`left` is `True` then `op(Q)` times `C` is computed, otherwise the result is `C` times `op(Q)`. +When :attr:`left` is `True`, the implicit matrix `Q` has size :math:`m \times m`. +It has size :math:`n \times n` otherwise. +If :attr:`transpose` is `True` then `op` is the conjugate transpose operation, otherwise it's a no-op. + +Supports inputs of float, double, cfloat and cdouble dtypes. +Also supports batched inputs, and, if the input is batched, the output is batched with the same dimensions. + +.. seealso:: + :func:`torch.geqrf` can be used to form the Householder representation `(input, tau)` of matrix `Q` + from the QR decomposition. + +.. note:: + This function supports backward but it is only fast when ``(input, tau)`` do not require gradients + and/or ``tau.size(-1)`` is very small. + `` + +Args: + input (Tensor): tensor of shape `(*, mn, k)` where `*` is zero or more batch dimensions + and `mn` equals to `m` or `n` depending on the :attr:`left`. + tau (Tensor): tensor of shape `(*, min(mn, k))` where `*` is zero or more batch dimensions. + other (Tensor): tensor of shape `(*, m, n)` where `*` is zero or more batch dimensions. + left (bool): controls the order of multiplication. + transpose (bool): controls whether the matrix `Q` is conjugate transposed or not. + +Keyword args: + out (Tensor, optional): the output Tensor. Ignored if `None`. Default: `None`. + +.. _Representation of Orthogonal or Unitary Matrices: + https://www.netlib.org/lapack/lug/node128.html +""", +) + +add_docstr( + torch.permute, + r""" +permute(input, dims) -> Tensor + +Returns a view of the original tensor :attr:`input` with its dimensions permuted. + +Args: + {input} + dims (tuple of int): The desired ordering of dimensions + +Example: + >>> x = torch.randn(2, 3, 5) + >>> x.size() + torch.Size([2, 3, 5]) + >>> torch.permute(x, (2, 0, 1)).size() + torch.Size([5, 2, 3]) +""".format( + **common_args + ), +) + +add_docstr( + torch.poisson, + r""" +poisson(input, generator=None) -> Tensor + +Returns a tensor of the same size as :attr:`input` with each element +sampled from a Poisson distribution with rate parameter given by the corresponding +element in :attr:`input` i.e., + +.. math:: + \text{{out}}_i \sim \text{{Poisson}}(\text{{input}}_i) + +:attr:`input` must be non-negative. + +Args: + input (Tensor): the input tensor containing the rates of the Poisson distribution + +Keyword args: + {generator} + +Example:: + + >>> rates = torch.rand(4, 4) * 5 # rate parameter between 0 and 5 + >>> torch.poisson(rates) + tensor([[9., 1., 3., 5.], + [8., 6., 6., 0.], + [0., 4., 5., 3.], + [2., 1., 4., 2.]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.polygamma, + r""" +polygamma(n, input, *, out=None) -> Tensor + +Alias for :func:`torch.special.polygamma`. +""", +) + +add_docstr( + torch.positive, + r""" +positive(input) -> Tensor + +Returns :attr:`input`. +Throws a runtime error if :attr:`input` is a bool tensor. +""" + + r""" +Args: + {input} + +Example:: + + >>> t = torch.randn(5) + >>> t + tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) + >>> torch.positive(t) + tensor([ 0.0090, -0.2262, -0.0682, -0.2866, 0.3940]) +""".format( + **common_args + ), +) + +add_docstr( + torch.pow, + r""" +pow(input, exponent, *, out=None) -> Tensor + +Takes the power of each element in :attr:`input` with :attr:`exponent` and +returns a tensor with the result. + +:attr:`exponent` can be either a single ``float`` number or a `Tensor` +with the same number of elements as :attr:`input`. + +When :attr:`exponent` is a scalar value, the operation applied is: + +.. math:: + \text{out}_i = x_i ^ \text{exponent} + +When :attr:`exponent` is a tensor, the operation applied is: + +.. math:: + \text{out}_i = x_i ^ {\text{exponent}_i} +""" + + r""" +When :attr:`exponent` is a tensor, the shapes of :attr:`input` +and :attr:`exponent` must be :ref:`broadcastable `. + +Args: + {input} + exponent (float or tensor): the exponent value + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.4331, 1.2475, 0.6834, -0.2791]) + >>> torch.pow(a, 2) + tensor([ 0.1875, 1.5561, 0.4670, 0.0779]) + >>> exp = torch.arange(1., 5.) + + >>> a = torch.arange(1., 5.) + >>> a + tensor([ 1., 2., 3., 4.]) + >>> exp + tensor([ 1., 2., 3., 4.]) + >>> torch.pow(a, exp) + tensor([ 1., 4., 27., 256.]) + +.. function:: pow(self, exponent, *, out=None) -> Tensor + :noindex: + +:attr:`self` is a scalar ``float`` value, and :attr:`exponent` is a tensor. +The returned tensor :attr:`out` is of the same shape as :attr:`exponent` + +The operation applied is: + +.. math:: + \text{{out}}_i = \text{{self}} ^ {{\text{{exponent}}_i}} + +Args: + self (float): the scalar base value for the power operation + exponent (Tensor): the exponent tensor + +Keyword args: + {out} + +Example:: + + >>> exp = torch.arange(1., 5.) + >>> base = 2 + >>> torch.pow(base, exp) + tensor([ 2., 4., 8., 16.]) +""".format( + **common_args + ), +) + +add_docstr( + torch.float_power, + r""" +float_power(input, exponent, *, out=None) -> Tensor + +Raises :attr:`input` to the power of :attr:`exponent`, elementwise, in double precision. +If neither input is complex returns a ``torch.float64`` tensor, +and if one or more inputs is complex returns a ``torch.complex128`` tensor. + +.. note:: + This function always computes in double precision, unlike :func:`torch.pow`, + which implements more typical :ref:`type promotion `. + This is useful when the computation needs to be performed in a wider or more precise dtype, + or the results of the computation may contain fractional values not representable in the input dtypes, + like when an integer base is raised to a negative integer exponent. + +Args: + input (Tensor or Number): the base value(s) + exponent (Tensor or Number): the exponent value(s) + +Keyword args: + {out} + +Example:: + + >>> a = torch.randint(10, (4,)) + >>> a + tensor([6, 4, 7, 1]) + >>> torch.float_power(a, 2) + tensor([36., 16., 49., 1.], dtype=torch.float64) + + >>> a = torch.arange(1, 5) + >>> a + tensor([ 1, 2, 3, 4]) + >>> exp = torch.tensor([2, -3, 4, -5]) + >>> exp + tensor([ 2, -3, 4, -5]) + >>> torch.float_power(a, exp) + tensor([1.0000e+00, 1.2500e-01, 8.1000e+01, 9.7656e-04], dtype=torch.float64) +""".format( + **common_args + ), +) + +add_docstr( + torch.prod, + r""" +prod(input, *, dtype=None) -> Tensor + +Returns the product of all elements in the :attr:`input` tensor. + +Args: + {input} + +Keyword args: + {dtype} + +Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[-0.8020, 0.5428, -1.5854]]) + >>> torch.prod(a) + tensor(0.6902) + +.. function:: prod(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + +Returns the product of each row of the :attr:`input` tensor in the given +dimension :attr:`dim`. + +{keepdim_details} + +Args: + {input} + {dim} + {keepdim} + +Keyword args: + {dtype} + +Example:: + + >>> a = torch.randn(4, 2) + >>> a + tensor([[ 0.5261, -0.3837], + [ 1.1857, -0.2498], + [-1.1646, 0.0705], + [ 1.1131, -1.0629]]) + >>> torch.prod(a, 1) + tensor([-0.2018, -0.2962, -0.0821, -1.1831]) +""".format( + **single_dim_common + ), +) + +add_docstr( + torch.promote_types, + r""" +promote_types(type1, type2) -> dtype + +Returns the :class:`torch.dtype` with the smallest size and scalar kind that is +not smaller nor of lower kind than either `type1` or `type2`. See type promotion +:ref:`documentation ` for more information on the type +promotion logic. + +Args: + type1 (:class:`torch.dtype`) + type2 (:class:`torch.dtype`) + +Example:: + + >>> torch.promote_types(torch.int32, torch.float32) + torch.float32 + >>> torch.promote_types(torch.uint8, torch.long) + torch.long +""", +) + +add_docstr( + torch.qr, + r""" +qr(input, some=True, *, out=None) -> (Tensor, Tensor) + +Computes the QR decomposition of a matrix or a batch of matrices :attr:`input`, +and returns a namedtuple (Q, R) of tensors such that :math:`\text{input} = Q R` +with :math:`Q` being an orthogonal matrix or batch of orthogonal matrices and +:math:`R` being an upper triangular matrix or batch of upper triangular matrices. + +If :attr:`some` is ``True``, then this function returns the thin (reduced) QR factorization. +Otherwise, if :attr:`some` is ``False``, this function returns the complete QR factorization. + +.. warning:: + + :func:`torch.qr` is deprecated in favor of :func:`torch.linalg.qr` + and will be removed in a future PyTorch release. The boolean parameter :attr:`some` has been + replaced with a string parameter :attr:`mode`. + + ``Q, R = torch.qr(A)`` should be replaced with + + .. code:: python + + Q, R = torch.linalg.qr(A) + + ``Q, R = torch.qr(A, some=False)`` should be replaced with + + .. code:: python + + Q, R = torch.linalg.qr(A, mode="complete") + +.. warning:: + If you plan to backpropagate through QR, note that the current backward implementation + is only well-defined when the first :math:`\min(input.size(-1), input.size(-2))` + columns of :attr:`input` are linearly independent. + This behavior will probably change once QR supports pivoting. + +.. note:: This function uses LAPACK for CPU inputs and MAGMA for CUDA inputs, + and may produce different (valid) decompositions on different device types + or different platforms. + +Args: + input (Tensor): the input tensor of size :math:`(*, m, n)` where `*` is zero or more + batch dimensions consisting of matrices of dimension :math:`m \times n`. + some (bool, optional): Set to ``True`` for reduced QR decomposition and ``False`` for + complete QR decomposition. If `k = min(m, n)` then: + + * ``some=True`` : returns `(Q, R)` with dimensions (m, k), (k, n) (default) + + * ``'some=False'``: returns `(Q, R)` with dimensions (m, m), (m, n) + +Keyword args: + out (tuple, optional): tuple of `Q` and `R` tensors. + The dimensions of `Q` and `R` are detailed in the description of :attr:`some` above. + +Example:: + + >>> a = torch.tensor([[12., -51, 4], [6, 167, -68], [-4, 24, -41]]) + >>> q, r = torch.qr(a) + >>> q + tensor([[-0.8571, 0.3943, 0.3314], + [-0.4286, -0.9029, -0.0343], + [ 0.2857, -0.1714, 0.9429]]) + >>> r + tensor([[ -14.0000, -21.0000, 14.0000], + [ 0.0000, -175.0000, 70.0000], + [ 0.0000, 0.0000, -35.0000]]) + >>> torch.mm(q, r).round() + tensor([[ 12., -51., 4.], + [ 6., 167., -68.], + [ -4., 24., -41.]]) + >>> torch.mm(q.t(), q).round() + tensor([[ 1., 0., 0.], + [ 0., 1., -0.], + [ 0., -0., 1.]]) + >>> a = torch.randn(3, 4, 5) + >>> q, r = torch.qr(a, some=False) + >>> torch.allclose(torch.matmul(q, r), a) + True + >>> torch.allclose(torch.matmul(q.mT, q), torch.eye(5)) + True +""", +) + +add_docstr( + torch.rad2deg, + r""" +rad2deg(input, *, out=None) -> Tensor + +Returns a new tensor with each of the elements of :attr:`input` +converted from angles in radians to degrees. + +Args: + {input} + +Keyword arguments: + {out} + +Example:: + + >>> a = torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]]) + >>> torch.rad2deg(a) + tensor([[ 180.0233, -180.0233], + [ 359.9894, -359.9894], + [ 89.9544, -89.9544]]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.deg2rad, + r""" +deg2rad(input, *, out=None) -> Tensor + +Returns a new tensor with each of the elements of :attr:`input` +converted from angles in degrees to radians. + +Args: + {input} + +Keyword arguments: + {out} + +Example:: + + >>> a = torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]]) + >>> torch.deg2rad(a) + tensor([[ 3.1416, -3.1416], + [ 6.2832, -6.2832], + [ 1.5708, -1.5708]]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.heaviside, + r""" +heaviside(input, values, *, out=None) -> Tensor + +Computes the Heaviside step function for each element in :attr:`input`. +The Heaviside step function is defined as: + +.. math:: + \text{{heaviside}}(input, values) = \begin{cases} + 0, & \text{if input < 0}\\ + values, & \text{if input == 0}\\ + 1, & \text{if input > 0} + \end{cases} +""" + + r""" + +Args: + {input} + values (Tensor): The values to use where :attr:`input` is zero. + +Keyword arguments: + {out} + +Example:: + + >>> input = torch.tensor([-1.5, 0, 2.0]) + >>> values = torch.tensor([0.5]) + >>> torch.heaviside(input, values) + tensor([0.0000, 0.5000, 1.0000]) + >>> values = torch.tensor([1.2, -2.0, 3.5]) + >>> torch.heaviside(input, values) + tensor([0., -2., 1.]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.rand, + """ +rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, \ +requires_grad=False, pin_memory=False) -> Tensor +""" + + r""" +Returns a tensor filled with random numbers from a uniform distribution +on the interval :math:`[0, 1)` + +The shape of the tensor is defined by the variable argument :attr:`size`. + +Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + +Keyword args: + {generator} + {out} + {dtype} + {layout} + {device} + {requires_grad} + {pin_memory} + +Example:: + + >>> torch.rand(4) + tensor([ 0.5204, 0.2503, 0.3525, 0.5673]) + >>> torch.rand(2, 3) + tensor([[ 0.8237, 0.5781, 0.6879], + [ 0.3816, 0.7249, 0.0998]]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.rand_like, + r""" +rand_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + +Returns a tensor with the same size as :attr:`input` that is filled with +random numbers from a uniform distribution on the interval :math:`[0, 1)`. +``torch.rand_like(input)`` is equivalent to +``torch.rand(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + +Args: + {input} + +Keyword args: + {dtype} + {layout} + {device} + {requires_grad} + {memory_format} + +""".format( + **factory_like_common_args + ), +) + +add_docstr( + torch.randint, + """ +randint(low=0, high, size, \\*, generator=None, out=None, \ +dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Returns a tensor filled with random integers generated uniformly +between :attr:`low` (inclusive) and :attr:`high` (exclusive). + +The shape of the tensor is defined by the variable argument :attr:`size`. + +.. note:: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + +Args: + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + size (tuple): a tuple defining the shape of the output tensor. + +Keyword args: + {generator} + {out} + dtype (`torch.dtype`, optional) - the desired data type of returned tensor. Default: if ``None``, + this function returns a tensor with dtype ``torch.int64``. + {layout} + {device} + {requires_grad} + +Example:: + + >>> torch.randint(3, 5, (3,)) + tensor([4, 3, 4]) + + + >>> torch.randint(10, (2, 2)) + tensor([[0, 2], + [5, 5]]) + + + >>> torch.randint(3, 10, (2, 2)) + tensor([[4, 5], + [6, 7]]) + + +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.randint_like, + """ +randint_like(input, low=0, high, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \ +memory_format=torch.preserve_format) -> Tensor + +Returns a tensor with the same shape as Tensor :attr:`input` filled with +random integers generated uniformly between :attr:`low` (inclusive) and +:attr:`high` (exclusive). + +.. note: + With the global dtype default (``torch.float32``), this function returns + a tensor with dtype ``torch.int64``. + +Args: + {input} + low (int, optional): Lowest integer to be drawn from the distribution. Default: 0. + high (int): One above the highest integer to be drawn from the distribution. + +Keyword args: + {dtype} + {layout} + {device} + {requires_grad} + {memory_format} + +""".format( + **factory_like_common_args + ), +) + +add_docstr( + torch.randn, + """ +randn(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, \ +pin_memory=False) -> Tensor +""" + + r""" + +Returns a tensor filled with random numbers from a normal distribution +with mean `0` and variance `1` (also called the standard normal +distribution). + +.. math:: + \text{{out}}_{{i}} \sim \mathcal{{N}}(0, 1) + +For complex dtypes, the tensor is i.i.d. sampled from a `complex normal distribution`_ with zero mean and +unit variance as + +.. math:: + \text{{out}}_{{i}} \sim \mathcal{{CN}}(0, 1) + +This is equivalent to separately sampling the real :math:`(\operatorname{{Re}})` and imaginary +:math:`(\operatorname{{Im}})` part of :math:`\text{{out}}_i` as + +.. math:: + \operatorname{{Re}}(\text{{out}}_{{i}}) \sim \mathcal{{N}}(0, \frac{{1}}{{2}}),\quad + \operatorname{{Im}}(\text{{out}}_{{i}}) \sim \mathcal{{N}}(0, \frac{{1}}{{2}}) + +The shape of the tensor is defined by the variable argument :attr:`size`. + + +Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + +Keyword args: + {generator} + {out} + {dtype} + {layout} + {device} + {requires_grad} + {pin_memory} + +Example:: + + >>> torch.randn(4) + tensor([-2.1436, 0.9966, 2.3426, -0.6366]) + >>> torch.randn(2, 3) + tensor([[ 1.5954, 2.8929, -1.0923], + [ 1.1719, -0.4709, -0.1996]]) + +.. _complex normal distribution: https://en.wikipedia.org/wiki/Complex_normal_distribution +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.randn_like, + r""" +randn_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + +Returns a tensor with the same size as :attr:`input` that is filled with +random numbers from a normal distribution with mean 0 and variance 1. Please refer to :func:`torch.randn` for the +sampling process of complex dtypes. ``torch.randn_like(input)`` is equivalent to +``torch.randn(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + +Args: + {input} + +Keyword args: + {dtype} + {layout} + {device} + {requires_grad} + {memory_format} + +""".format( + **factory_like_common_args + ), +) + +add_docstr( + torch.randperm, + """ +randperm(n, *, generator=None, out=None, dtype=torch.int64,layout=torch.strided, \ +device=None, requires_grad=False, pin_memory=False) -> Tensor +""" + + r""" +Returns a random permutation of integers from ``0`` to ``n - 1``. + +Args: + n (int): the upper bound (exclusive) + +Keyword args: + {generator} + {out} + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: ``torch.int64``. + {layout} + {device} + {requires_grad} + {pin_memory} + +Example:: + + >>> torch.randperm(4) + tensor([2, 1, 0, 3]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.tensor, + r""" +tensor(data, *, dtype=None, device=None, requires_grad=False, pin_memory=False) -> Tensor + +Constructs a tensor with no autograd history (also known as a "leaf tensor", see :doc:`/notes/autograd`) by copying :attr:`data`. + +.. warning:: + + When working with tensors prefer using :func:`torch.Tensor.clone`, + :func:`torch.Tensor.detach`, and :func:`torch.Tensor.requires_grad_` for + readability. Letting `t` be a tensor, ``torch.tensor(t)`` is equivalent to + ``t.clone().detach()``, and ``torch.tensor(t, requires_grad=True)`` + is equivalent to ``t.clone().detach().requires_grad_(True)``. + +.. seealso:: + + :func:`torch.as_tensor` preserves autograd history and avoids copies where possible. + :func:`torch.from_numpy` creates a tensor that shares storage with a NumPy array. + +Args: + {data} + +Keyword args: + {dtype} + device (:class:`torch.device`, optional): the device of the constructed tensor. If None and data is a tensor + then the device of data is used. If None and data is not a tensor then + the result tensor is constructed on the current device. + {requires_grad} + {pin_memory} + + +Example:: + + >>> torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]]) + tensor([[ 0.1000, 1.2000], + [ 2.2000, 3.1000], + [ 4.9000, 5.2000]]) + + >>> torch.tensor([0, 1]) # Type inference on data + tensor([ 0, 1]) + + >>> torch.tensor([[0.11111, 0.222222, 0.3333333]], + ... dtype=torch.float64, + ... device=torch.device('cuda:0')) # creates a double tensor on a CUDA device + tensor([[ 0.1111, 0.2222, 0.3333]], dtype=torch.float64, device='cuda:0') + + >>> torch.tensor(3.14159) # Create a zero-dimensional (scalar) tensor + tensor(3.1416) + + >>> torch.tensor([]) # Create an empty tensor (of size (0,)) + tensor([]) +""".format( + **factory_data_common_args + ), +) + +add_docstr( + torch.range, + r""" +range(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1` +with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is +the gap between two values in the tensor. + +.. math:: + \text{out}_{i+1} = \text{out}_i + \text{step}. +""" + + r""" +.. warning:: + This function is deprecated and will be removed in a future release because its behavior is inconsistent with + Python's range builtin. Instead, use :func:`torch.arange`, which produces values in [start, end). + +Args: + start (float): the starting value for the set of points. Default: ``0``. + end (float): the ending value for the set of points + step (float): the gap between each pair of adjacent points. Default: ``1``. + +Keyword args: + {out} + {dtype} If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + {layout} + {device} + {requires_grad} + +Example:: + + >>> torch.range(1, 4) + tensor([ 1., 2., 3., 4.]) + >>> torch.range(1, 4, 0.5) + tensor([ 1.0000, 1.5000, 2.0000, 2.5000, 3.0000, 3.5000, 4.0000]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.arange, + r""" +arange(start=0, end, step=1, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Returns a 1-D tensor of size :math:`\left\lceil \frac{\text{end} - \text{start}}{\text{step}} \right\rceil` +with values from the interval ``[start, end)`` taken with common difference +:attr:`step` beginning from `start`. + +Note that non-integer :attr:`step` is subject to floating point rounding errors when +comparing against :attr:`end`; to avoid inconsistency, we advise subtracting a small epsilon from :attr:`end` +in such cases. + +.. math:: + \text{out}_{{i+1}} = \text{out}_{i} + \text{step} +""" + + r""" +Args: + start (Number): the starting value for the set of points. Default: ``0``. + end (Number): the ending value for the set of points + step (Number): the gap between each pair of adjacent points. Default: ``1``. + +Keyword args: + {out} + {dtype} If `dtype` is not given, infer the data type from the other input + arguments. If any of `start`, `end`, or `stop` are floating-point, the + `dtype` is inferred to be the default dtype, see + :meth:`~torch.get_default_dtype`. Otherwise, the `dtype` is inferred to + be `torch.int64`. + {layout} + {device} + {requires_grad} + +Example:: + + >>> torch.arange(5) + tensor([ 0, 1, 2, 3, 4]) + >>> torch.arange(1, 4) + tensor([ 1, 2, 3]) + >>> torch.arange(1, 2.5, 0.5) + tensor([ 1.0000, 1.5000, 2.0000]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.ravel, + r""" +ravel(input) -> Tensor + +Return a contiguous flattened tensor. A copy is made only if needed. + +Args: + {input} + +Example:: + + >>> t = torch.tensor([[[1, 2], + ... [3, 4]], + ... [[5, 6], + ... [7, 8]]]) + >>> torch.ravel(t) + tensor([1, 2, 3, 4, 5, 6, 7, 8]) +""".format( + **common_args + ), +) + +add_docstr( + torch.remainder, + r""" +remainder(input, other, *, out=None) -> Tensor + +Computes +`Python's modulus operation `_ +entrywise. The result has the same sign as the divisor :attr:`other` and its absolute value +is less than that of :attr:`other`. + +It may also be defined in terms of :func:`torch.div` as + +.. code:: python + + torch.remainder(a, b) == a - a.div(b, rounding_mode="floor") * b + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer and float inputs. + +.. note:: + Complex inputs are not supported. In some cases, it is not mathematically + possible to satisfy the definition of a modulo operation with complex numbers. + See :func:`torch.fmod` for how division by zero is handled. + +.. seealso:: + + :func:`torch.fmod` which implements C++'s `std::fmod `_. + This one is defined in terms of division rounding towards zero. + +Args: + input (Tensor or Scalar): the dividend + other (Tensor or Scalar): the divisor + +Keyword args: + {out} + +Example:: + + >>> torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2) + tensor([ 1., 0., 1., 1., 0., 1.]) + >>> torch.remainder(torch.tensor([1, 2, 3, 4, 5]), -1.5) + tensor([ -0.5000, -1.0000, 0.0000, -0.5000, -1.0000 ]) +""".format( + **common_args + ), +) + +add_docstr( + torch.renorm, + r""" +renorm(input, p, dim, maxnorm, *, out=None) -> Tensor + +Returns a tensor where each sub-tensor of :attr:`input` along dimension +:attr:`dim` is normalized such that the `p`-norm of the sub-tensor is lower +than the value :attr:`maxnorm` + +.. note:: If the norm of a row is lower than `maxnorm`, the row is unchanged + +Args: + {input} + p (float): the power for the norm computation + dim (int): the dimension to slice over to get the sub-tensors + maxnorm (float): the maximum norm to keep each sub-tensor under + +Keyword args: + {out} + +Example:: + + >>> x = torch.ones(3, 3) + >>> x[1].fill_(2) + tensor([ 2., 2., 2.]) + >>> x[2].fill_(3) + tensor([ 3., 3., 3.]) + >>> x + tensor([[ 1., 1., 1.], + [ 2., 2., 2.], + [ 3., 3., 3.]]) + >>> torch.renorm(x, 1, 0, 5) + tensor([[ 1.0000, 1.0000, 1.0000], + [ 1.6667, 1.6667, 1.6667], + [ 1.6667, 1.6667, 1.6667]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.reshape, + r""" +reshape(input, shape) -> Tensor + +Returns a tensor with the same data and number of elements as :attr:`input`, +but with the specified shape. When possible, the returned tensor will be a view +of :attr:`input`. Otherwise, it will be a copy. Contiguous inputs and inputs +with compatible strides can be reshaped without copying, but you should not +depend on the copying vs. viewing behavior. + +See :meth:`torch.Tensor.view` on when it is possible to return a view. + +A single dimension may be -1, in which case it's inferred from the remaining +dimensions and the number of elements in :attr:`input`. + +Args: + input (Tensor): the tensor to be reshaped + shape (tuple of int): the new shape + +Example:: + + >>> a = torch.arange(4.) + >>> torch.reshape(a, (2, 2)) + tensor([[ 0., 1.], + [ 2., 3.]]) + >>> b = torch.tensor([[0, 1], [2, 3]]) + >>> torch.reshape(b, (-1,)) + tensor([ 0, 1, 2, 3]) +""", +) + + +add_docstr( + torch.result_type, + r""" +result_type(tensor1, tensor2) -> dtype + +Returns the :class:`torch.dtype` that would result from performing an arithmetic +operation on the provided input tensors. See type promotion :ref:`documentation ` +for more information on the type promotion logic. + +Args: + tensor1 (Tensor or Number): an input tensor or number + tensor2 (Tensor or Number): an input tensor or number + +Example:: + + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.int), 1.0) + torch.float32 + >>> torch.result_type(torch.tensor([1, 2], dtype=torch.uint8), torch.tensor(1)) + torch.uint8 +""", +) + +add_docstr( + torch.row_stack, + r""" +row_stack(tensors, *, out=None) -> Tensor + +Alias of :func:`torch.vstack`. +""", +) + +add_docstr( + torch.round, + r""" +round(input, *, decimals=0, out=None) -> Tensor + +Rounds elements of :attr:`input` to the nearest integer. + +For integer inputs, follows the array-api convention of returning a +copy of the input tensor. +The return type of output is same as that of input's dtype. + +.. note:: + This function implements the "round half to even" to + break ties when a number is equidistant from two + integers (e.g. `round(2.5)` is 2). + + When the :attr:\`decimals\` argument is specified the + algorithm used is similar to NumPy's `around`. This + algorithm is fast but inexact and it can easily + overflow for low precision dtypes. + Eg. `round(tensor([10000], dtype=torch.float16), decimals=3)` is `inf`. + +.. seealso:: + :func:`torch.ceil`, which rounds up. + :func:`torch.floor`, which rounds down. + :func:`torch.trunc`, which rounds towards zero. + +Args: + {input} + decimals (int): Number of decimal places to round to (default: 0). + If decimals is negative, it specifies the number of positions + to the left of the decimal point. + +Keyword args: + {out} + +Example:: + + >>> torch.round(torch.tensor((4.7, -2.3, 9.1, -7.7))) + tensor([ 5., -2., 9., -8.]) + + >>> # Values equidistant from two integers are rounded towards the + >>> # the nearest even value (zero is treated as even) + >>> torch.round(torch.tensor([-0.5, 0.5, 1.5, 2.5])) + tensor([-0., 0., 2., 2.]) + + >>> # A positive decimals argument rounds to the to that decimal place + >>> torch.round(torch.tensor([0.1234567]), decimals=3) + tensor([0.1230]) + + >>> # A negative decimals argument rounds to the left of the decimal + >>> torch.round(torch.tensor([1200.1234567]), decimals=-3) + tensor([1000.]) +""".format( + **common_args + ), +) + +add_docstr( + torch.rsqrt, + r""" +rsqrt(input, *, out=None) -> Tensor + +Returns a new tensor with the reciprocal of the square-root of each of +the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}} +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.0370, 0.2970, 1.5420, -0.9105]) + >>> torch.rsqrt(a) + tensor([ nan, 1.8351, 0.8053, nan]) +""".format( + **common_args + ), +) + +add_docstr( + torch.scatter, + r""" +scatter(input, dim, index, src) -> Tensor + +Out-of-place version of :meth:`torch.Tensor.scatter_` +""", +) + +add_docstr( + torch.scatter_add, + r""" +scatter_add(input, dim, index, src) -> Tensor + +Out-of-place version of :meth:`torch.Tensor.scatter_add_` +""", +) + +add_docstr( + torch.scatter_reduce, + r""" +scatter_reduce(input, dim, index, src, reduce, *, include_self=True) -> Tensor + +Out-of-place version of :meth:`torch.Tensor.scatter_reduce_` +""", +) + +add_docstr( + torch.select, + r""" +select(input, dim, index) -> Tensor + +Slices the :attr:`input` tensor along the selected dimension at the given index. +This function returns a view of the original tensor with the given dimension removed. + +.. note:: If :attr:`input` is a sparse tensor and returning a view of + the tensor is not possible, a RuntimeError exception is + raised. In this is the case, consider using + :func:`torch.select_copy` function. + +Args: + {input} + dim (int): the dimension to slice + index (int): the index to select with + +.. note:: + + :meth:`select` is equivalent to slicing. For example, + ``tensor.select(0, index)`` is equivalent to ``tensor[index]`` and + ``tensor.select(2, index)`` is equivalent to ``tensor[:,:,index]``. +""".format( + **common_args + ), +) + +add_docstr( + torch.select_scatter, + r""" +select_scatter(input, src, dim, index) -> Tensor + +Embeds the values of the :attr:`src` tensor into :attr:`input` at the given index. +This function returns a tensor with fresh storage; it does not create a view. + + +Args: + {input} + src (Tensor): The tensor to embed into :attr:`input` + dim (int): the dimension to insert the slice into. + index (int): the index to select with + +.. note:: + + :attr:`src` must be of the proper size in order to be embedded + into :attr:`input`. Specifically, it should have the same shape as + ``torch.select(input, dim, index)`` + +Example:: + + >>> a = torch.zeros(2, 2) + >>> b = torch.ones(2) + >>> a.select_scatter(b, 0, 0) + tensor([[1., 1.], + [0., 0.]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.slice_scatter, + r""" +slice_scatter(input, src, dim=0, start=None, end=None, step=1) -> Tensor + +Embeds the values of the :attr:`src` tensor into :attr:`input` at the given +dimension. +This function returns a tensor with fresh storage; it does not create a view. + + +Args: + {input} + src (Tensor): The tensor to embed into :attr:`input` + dim (int): the dimension to insert the slice into + start (Optional[int]): the start index of where to insert the slice + end (Optional[int]): the end index of where to insert the slice + step (int): the how many elements to skip in + +Example:: + + >>> a = torch.zeros(8, 8) + >>> b = torch.ones(2, 8) + >>> a.slice_scatter(b, start=6) + tensor([[0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., 0., 0., 0.], + [1., 1., 1., 1., 1., 1., 1., 1.], + [1., 1., 1., 1., 1., 1., 1., 1.]]) + + >>> b = torch.ones(8, 2) + >>> a.slice_scatter(b, dim=1, start=2, end=6, step=2) + tensor([[0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.], + [0., 0., 1., 0., 1., 0., 0., 0.]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.set_flush_denormal, + r""" +set_flush_denormal(mode) -> bool + +Disables denormal floating numbers on CPU. + +Returns ``True`` if your system supports flushing denormal numbers and it +successfully configures flush denormal mode. :meth:`~torch.set_flush_denormal` +is supported on x86 architectures supporting SSE3 and AArch64 architecture. + +Args: + mode (bool): Controls whether to enable flush denormal mode or not + +Example:: + + >>> torch.set_flush_denormal(True) + True + >>> torch.tensor([1e-323], dtype=torch.float64) + tensor([ 0.], dtype=torch.float64) + >>> torch.set_flush_denormal(False) + True + >>> torch.tensor([1e-323], dtype=torch.float64) + tensor(9.88131e-324 * + [ 1.0000], dtype=torch.float64) +""", +) + +add_docstr( + torch.set_num_threads, + r""" +set_num_threads(int) + +Sets the number of threads used for intraop parallelism on CPU. + +.. warning:: + To ensure that the correct number of threads is used, set_num_threads + must be called before running eager, JIT or autograd code. +""", +) + +add_docstr( + torch.set_num_interop_threads, + r""" +set_num_interop_threads(int) + +Sets the number of threads used for interop parallelism +(e.g. in JIT interpreter) on CPU. + +.. warning:: + Can only be called once and before any inter-op parallel work + is started (e.g. JIT execution). +""", +) + +add_docstr( + torch.sigmoid, + r""" +sigmoid(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.expit`. +""", +) + +add_docstr( + torch.logit, + r""" +logit(input, eps=None, *, out=None) -> Tensor + +Alias for :func:`torch.special.logit`. +""", +) + +add_docstr( + torch.sign, + r""" +sign(input, *, out=None) -> Tensor + +Returns a new tensor with the signs of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \operatorname{sgn}(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([0.7, -1.2, 0., 2.3]) + >>> a + tensor([ 0.7000, -1.2000, 0.0000, 2.3000]) + >>> torch.sign(a) + tensor([ 1., -1., 0., 1.]) +""".format( + **common_args + ), +) + +add_docstr( + torch.signbit, + r""" +signbit(input, *, out=None) -> Tensor + +Tests if each element of :attr:`input` has its sign bit set or not. + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.tensor([0.7, -1.2, 0., 2.3]) + >>> torch.signbit(a) + tensor([ False, True, False, False]) + >>> a = torch.tensor([-0.0, 0.0]) + >>> torch.signbit(a) + tensor([ True, False]) + +.. note:: + signbit handles signed zeros, so negative zero (-0) returns True. + +""".format( + **common_args + ), +) + +add_docstr( + torch.sgn, + r""" +sgn(input, *, out=None) -> Tensor + +This function is an extension of torch.sign() to complex tensors. +It computes a new tensor whose elements have +the same angles as the corresponding elements of :attr:`input` and +absolute values (i.e. magnitudes) of one for complex tensors and +is equivalent to torch.sign() for non-complex tensors. + +.. math:: + \text{out}_{i} = \begin{cases} + 0 & |\text{{input}}_i| == 0 \\ + \frac{{\text{{input}}_i}}{|{\text{{input}}_i}|} & \text{otherwise} + \end{cases} + +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> t = torch.tensor([3+4j, 7-24j, 0, 1+2j]) + >>> t.sgn() + tensor([0.6000+0.8000j, 0.2800-0.9600j, 0.0000+0.0000j, 0.4472+0.8944j]) +""".format( + **common_args + ), +) + +add_docstr( + torch.sin, + r""" +sin(input, *, out=None) -> Tensor + +Returns a new tensor with the sine of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \sin(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-0.5461, 0.1347, -2.7266, -0.2746]) + >>> torch.sin(a) + tensor([-0.5194, 0.1343, -0.4032, -0.2711]) +""".format( + **common_args + ), +) + +add_docstr( + torch.sinc, + r""" +sinc(input, *, out=None) -> Tensor + +Alias for :func:`torch.special.sinc`. +""", +) + +add_docstr( + torch.sinh, + r""" +sinh(input, *, out=None) -> Tensor + +Returns a new tensor with the hyperbolic sine of the elements of +:attr:`input`. + +.. math:: + \text{out}_{i} = \sinh(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.5380, -0.8632, -0.1265, 0.9399]) + >>> torch.sinh(a) + tensor([ 0.5644, -0.9744, -0.1268, 1.0845]) + +.. note:: + When :attr:`input` is on the CPU, the implementation of torch.sinh may use + the Sleef library, which rounds very large results to infinity or negative + infinity. See `here `_ for details. +""".format( + **common_args + ), +) + +add_docstr( + torch.sort, + r""" +sort(input, dim=-1, descending=False, stable=False, *, out=None) -> (Tensor, LongTensor) + +Sorts the elements of the :attr:`input` tensor along a given dimension +in ascending order by value. + +If :attr:`dim` is not given, the last dimension of the `input` is chosen. + +If :attr:`descending` is ``True`` then the elements are sorted in descending +order by value. + +If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving +the order of equivalent elements. + +A namedtuple of (values, indices) is returned, where the `values` are the +sorted values and `indices` are the indices of the elements in the original +`input` tensor. + +Args: + {input} + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): makes the sorting routine stable, which guarantees that the order + of equivalent elements is preserved. + +Keyword args: + out (tuple, optional): the output tuple of (`Tensor`, `LongTensor`) that can + be optionally given to be used as output buffers + +Example:: + + >>> x = torch.randn(3, 4) + >>> sorted, indices = torch.sort(x) + >>> sorted + tensor([[-0.2162, 0.0608, 0.6719, 2.3332], + [-0.5793, 0.0061, 0.6058, 0.9497], + [-0.5071, 0.3343, 0.9553, 1.0960]]) + >>> indices + tensor([[ 1, 0, 2, 3], + [ 3, 1, 0, 2], + [ 0, 3, 1, 2]]) + + >>> sorted, indices = torch.sort(x, 0) + >>> sorted + tensor([[-0.5071, -0.2162, 0.6719, -0.5793], + [ 0.0608, 0.0061, 0.9497, 0.3343], + [ 0.6058, 0.9553, 1.0960, 2.3332]]) + >>> indices + tensor([[ 2, 0, 0, 1], + [ 0, 1, 1, 2], + [ 1, 2, 2, 0]]) + >>> x = torch.tensor([0, 1] * 9) + >>> x.sort() + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 2, 16, 4, 6, 14, 8, 0, 10, 12, 9, 17, 15, 13, 11, 7, 5, 3, 1])) + >>> x.sort(stable=True) + torch.return_types.sort( + values=tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]), + indices=tensor([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 1, 3, 5, 7, 9, 11, 13, 15, 17])) +""".format( + **common_args + ), +) + +add_docstr( + torch.argsort, + r""" +argsort(input, dim=-1, descending=False, stable=False) -> Tensor + +Returns the indices that sort a tensor along a given dimension in ascending +order by value. + +This is the second value returned by :meth:`torch.sort`. See its documentation +for the exact semantics of this method. + +If :attr:`stable` is ``True`` then the sorting routine becomes stable, preserving +the order of equivalent elements. If ``False``, the relative order of values +which compare equal is not guaranteed. ``True`` is slower. + +Args: + {input} + dim (int, optional): the dimension to sort along + descending (bool, optional): controls the sorting order (ascending or descending) + stable (bool, optional): controls the relative order of equivalent elements + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.0785, 1.5267, -0.8521, 0.4065], + [ 0.1598, 0.0788, -0.0745, -1.2700], + [ 1.2208, 1.0722, -0.7064, 1.2564], + [ 0.0669, -0.2318, -0.8229, -0.9280]]) + + + >>> torch.argsort(a, dim=1) + tensor([[2, 0, 3, 1], + [3, 2, 1, 0], + [2, 1, 0, 3], + [3, 2, 1, 0]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.msort, + r""" +msort(input, *, out=None) -> Tensor + +Sorts the elements of the :attr:`input` tensor along its first dimension +in ascending order by value. + +.. note:: `torch.msort(t)` is equivalent to `torch.sort(t, dim=0)[0]`. + See also :func:`torch.sort`. + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> t = torch.randn(3, 4) + >>> t + tensor([[-0.1321, 0.4370, -1.2631, -1.1289], + [-2.0527, -1.1250, 0.2275, 0.3077], + [-0.0881, -0.1259, -0.5495, 1.0284]]) + >>> torch.msort(t) + tensor([[-2.0527, -1.1250, -1.2631, -1.1289], + [-0.1321, -0.1259, -0.5495, 0.3077], + [-0.0881, 0.4370, 0.2275, 1.0284]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.sparse_compressed_tensor, + r"""sparse_compressed_tensor(compressed_indices, plain_indices, values, size=None, """ + r"""*, dtype=None, layout=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + +Constructs a :ref:`sparse tensor in Compressed Sparse format - CSR, +CSC, BSR, or BSC - ` with specified values at +the given :attr:`compressed_indices` and :attr:`plain_indices`. Sparse +matrix multiplication operations in Compressed Sparse format are +typically faster than that for sparse tensors in COO format. Make you +have a look at :ref:`the note on the data type of the indices +`. + +{sparse_factory_device_note} + +Args: + compressed_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, compressed_dim_size + 1)``. The last element of + each batch is the number of non-zero elements or blocks. This + tensor encodes the index in ``values`` and ``plain_indices`` + depending on where the given compressed dimension (row or + column) starts. Each successive number in the tensor + subtracted by the number before it denotes the number of + elements or blocks in a given compressed dimension. + plain_indices (array_like): Plain dimension (column or row) + co-ordinates of each element or block in values. (B+1)-dimensional + tensor with the same length as values. + + values (array_list): Initial values for the tensor. Can be a list, + tuple, NumPy ``ndarray``, scalar, and other types. that + represents a (1+K)-dimensional (for CSR and CSC layouts) or + (1+2+K)-dimensional tensor (for BSR and BSC layouts) where + ``K`` is the number of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols * + blocksize[1], *densesize)`` where ``blocksize[0] == + blocksize[1] == 1`` for CSR and CSC formats. If not provided, + the size will be inferred as the minimum size big enough to + hold all non-zero elements or blocks. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + layout (:class:`torch.layout`, required): the desired layout of + returned tensor: :attr:`torch.sparse_csr`, + :attr:`torch.sparse_csc`, :attr:`torch.sparse_bsr`, or + :attr:`torch.sparse_bsc`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_device`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + {requires_grad} + {check_invariants} + +Example:: + >>> compressed_indices = [0, 2, 4] + >>> plain_indices = [0, 1, 0, 1] + >>> values = [1, 2, 3, 4] + >>> torch.sparse_compressed_tensor(torch.tensor(compressed_indices, dtype=torch.int64), + ... torch.tensor(plain_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double, layout=torch.sparse_csr) + tensor(crow_indices=tensor([0, 2, 4]), + col_indices=tensor([0, 1, 0, 1]), + values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4, + dtype=torch.float64, layout=torch.sparse_csr) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.sparse_csr_tensor, + r"""sparse_csr_tensor(crow_indices, col_indices, values, size=None, """ + r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + +Constructs a :ref:`sparse tensor in CSR (Compressed Sparse Row) ` with specified +values at the given :attr:`crow_indices` and :attr:`col_indices`. Sparse matrix multiplication operations +in CSR format are typically faster than that for sparse tensors in COO format. Make you have a look +at :ref:`the note on the data type of the indices `. + +{sparse_factory_device_note} + +Args: + crow_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, nrows + 1)``. The last element of each batch + is the number of non-zeros. This tensor encodes the index in + values and col_indices depending on where the given row + starts. Each successive number in the tensor subtracted by the + number before it denotes the number of elements in a given + row. + col_indices (array_like): Column co-ordinates of each element in + values. (B+1)-dimensional tensor with the same length + as values. + values (array_list): Initial values for the tensor. Can be a list, + tuple, NumPy ``ndarray``, scalar, and other types that + represents a (1+K)-dimensional tensor where ``K`` is the number + of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If + not provided, the size will be inferred as the minimum size + big enough to hold all non-zero elements. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_device`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + {requires_grad} + {check_invariants} + +Example:: + >>> crow_indices = [0, 2, 4] + >>> col_indices = [0, 1, 0, 1] + >>> values = [1, 2, 3, 4] + >>> torch.sparse_csr_tensor(torch.tensor(crow_indices, dtype=torch.int64), + ... torch.tensor(col_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double) + tensor(crow_indices=tensor([0, 2, 4]), + col_indices=tensor([0, 1, 0, 1]), + values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4, + dtype=torch.float64, layout=torch.sparse_csr) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.sparse_csc_tensor, + r"""sparse_csc_tensor(ccol_indices, row_indices, values, size=None, """ + r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + +Constructs a :ref:`sparse tensor in CSC (Compressed Sparse Column) +` with specified values at the given +:attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix +multiplication operations in CSC format are typically faster than that +for sparse tensors in COO format. Make you have a look at :ref:`the +note on the data type of the indices `. + +{sparse_factory_device_note} + +Args: + ccol_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, ncols + 1)``. The last element of each batch + is the number of non-zeros. This tensor encodes the index in + values and row_indices depending on where the given column + starts. Each successive number in the tensor subtracted by the + number before it denotes the number of elements in a given + column. + row_indices (array_like): Row co-ordinates of each element in + values. (B+1)-dimensional tensor with the same length as + values. + values (array_list): Initial values for the tensor. Can be a list, + tuple, NumPy ``ndarray``, scalar, and other types that + represents a (1+K)-dimensional tensor where ``K`` is the number + of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows, ncols, *densesize)``. If + not provided, the size will be inferred as the minimum size + big enough to hold all non-zero elements. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_device`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + {requires_grad} + {check_invariants} + +Example:: + >>> ccol_indices = [0, 2, 4] + >>> row_indices = [0, 1, 0, 1] + >>> values = [1, 2, 3, 4] + >>> torch.sparse_csc_tensor(torch.tensor(ccol_indices, dtype=torch.int64), + ... torch.tensor(row_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double) + tensor(ccol_indices=tensor([0, 2, 4]), + row_indices=tensor([0, 1, 0, 1]), + values=tensor([1., 2., 3., 4.]), size=(2, 2), nnz=4, + dtype=torch.float64, layout=torch.sparse_csc) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.sparse_bsr_tensor, + r"""sparse_bsr_tensor(crow_indices, col_indices, values, size=None, """ + r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + +Constructs a :ref:`sparse tensor in BSR (Block Compressed Sparse Row)) +` with specified 2-dimensional blocks at the given +:attr:`crow_indices` and :attr:`col_indices`. Sparse matrix +multiplication operations in BSR format are typically faster than that +for sparse tensors in COO format. Make you have a look at :ref:`the +note on the data type of the indices `. + +{sparse_factory_device_note} + +Args: + crow_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, nrowblocks + 1)``. The last element of each + batch is the number of non-zeros. This tensor encodes the + block index in values and col_indices depending on where the + given row block starts. Each successive number in the tensor + subtracted by the number before it denotes the number of + blocks in a given row. + col_indices (array_like): Column block co-ordinates of each block + in values. (B+1)-dimensional tensor with the same length as + values. + values (array_list): Initial values for the tensor. Can be a list, + tuple, NumPy ``ndarray``, scalar, and other types that + represents a (1 + 2 + K)-dimensional tensor where ``K`` is the + number of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols * + blocksize[1], *densesize)`` where ``blocksize == + values.shape[1:3]``. If not provided, the size will be + inferred as the minimum size big enough to hold all non-zero + blocks. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_device`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + {requires_grad} + {check_invariants} + +Example:: + >>> crow_indices = [0, 1, 2] + >>> col_indices = [0, 1] + >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + >>> torch.sparse_bsr_tensor(torch.tensor(crow_indices, dtype=torch.int64), + ... torch.tensor(col_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double) + tensor(crow_indices=tensor([0, 1, 2]), + col_indices=tensor([0, 1]), + values=tensor([[[1., 2.], + [3., 4.]], + [[5., 6.], + [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64, + layout=torch.sparse_bsr) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.sparse_bsc_tensor, + r"""sparse_bsc_tensor(ccol_indices, row_indices, values, size=None, """ + r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None) -> Tensor + +Constructs a :ref:`sparse tensor in BSC (Block Compressed Sparse +Column)) ` with specified 2-dimensional blocks at the +given :attr:`ccol_indices` and :attr:`row_indices`. Sparse matrix +multiplication operations in BSC format are typically faster than that +for sparse tensors in COO format. Make you have a look at :ref:`the +note on the data type of the indices `. + +{sparse_factory_device_note} + +Args: + ccol_indices (array_like): (B+1)-dimensional array of size + ``(*batchsize, ncolblocks + 1)``. The last element of each + batch is the number of non-zeros. This tensor encodes the + index in values and row_indices depending on where the given + column starts. Each successive number in the tensor subtracted + by the number before it denotes the number of elements in a + given column. + row_indices (array_like): Row block co-ordinates of each block in + values. (B+1)-dimensional tensor with the same length + as values. + values (array_list): Initial blocks for the tensor. Can be a list, + tuple, NumPy ``ndarray``, and other types that + represents a (1 + 2 + K)-dimensional tensor where ``K`` is the + number of dense dimensions. + size (list, tuple, :class:`torch.Size`, optional): Size of the + sparse tensor: ``(*batchsize, nrows * blocksize[0], ncols * + blocksize[1], *densesize)`` If not provided, the size will be + inferred as the minimum size big enough to hold all non-zero + blocks. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. Default: if None, infers data type from + :attr:`values`. + device (:class:`torch.device`, optional): the desired device of + returned tensor. Default: if None, uses the current device + for the default tensor type (see + :func:`torch.set_default_device`). :attr:`device` will be + the CPU for CPU tensor types and the current CUDA device for + CUDA tensor types. + {requires_grad} + {check_invariants} + +Example:: + >>> ccol_indices = [0, 1, 2] + >>> row_indices = [0, 1] + >>> values = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] + >>> torch.sparse_bsc_tensor(torch.tensor(ccol_indices, dtype=torch.int64), + ... torch.tensor(row_indices, dtype=torch.int64), + ... torch.tensor(values), dtype=torch.double) + tensor(ccol_indices=tensor([0, 1, 2]), + row_indices=tensor([0, 1]), + values=tensor([[[1., 2.], + [3., 4.]], + [[5., 6.], + [7., 8.]]]), size=(2, 2), nnz=2, dtype=torch.float64, + layout=torch.sparse_bsc) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.sparse_coo_tensor, + r"""sparse_coo_tensor(indices, values, size=None, """ + r"""*, dtype=None, device=None, requires_grad=False, check_invariants=None, is_coalesced=None) -> Tensor + +Constructs a :ref:`sparse tensor in COO(rdinate) format +` with specified values at the given +:attr:`indices`. + +.. note:: + + This function returns an :ref:`uncoalesced tensor + ` when :attr:`is_coalesced` is + unspecified or ``None``. + +{sparse_factory_device_note} + +Args: + indices (array_like): Initial data for the tensor. Can be a list, tuple, + NumPy ``ndarray``, scalar, and other types. Will be cast to a :class:`torch.LongTensor` + internally. The indices are the coordinates of the non-zero values in the matrix, and thus + should be two-dimensional where the first dimension is the number of tensor dimensions and + the second dimension is the number of non-zero values. + values (array_like): Initial values for the tensor. Can be a list, tuple, + NumPy ``ndarray``, scalar, and other types. + size (list, tuple, or :class:`torch.Size`, optional): Size of the sparse tensor. If not + provided the size will be inferred as the minimum size big enough to hold all non-zero + elements. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if None, infers data type from :attr:`values`. + device (:class:`torch.device`, optional): the desired device of returned tensor. + Default: if None, uses the current device for the default tensor type + (see :func:`torch.set_default_device`). :attr:`device` will be the CPU + for CPU tensor types and the current CUDA device for CUDA tensor types. + {requires_grad} + {check_invariants} + is_coalesced (bool, optional): When``True``, the caller is + responsible for providing tensor indices that correspond to a + coalesced tensor. If the :attr:`check_invariants` flag is + False, no error will be raised if the prerequisites are not + met and this will lead to silently incorrect results. To force + coalescion please use :meth:`coalesce` on the resulting + Tensor. + Default: None: except for trivial cases (e.g. nnz < 2) the + resulting Tensor has is_coalesced set to ``False```. + +Example:: + + >>> i = torch.tensor([[0, 1, 1], + ... [2, 0, 2]]) + >>> v = torch.tensor([3, 4, 5], dtype=torch.float32) + >>> torch.sparse_coo_tensor(i, v, [2, 4]) + tensor(indices=tensor([[0, 1, 1], + [2, 0, 2]]), + values=tensor([3., 4., 5.]), + size=(2, 4), nnz=3, layout=torch.sparse_coo) + + >>> torch.sparse_coo_tensor(i, v) # Shape inference + tensor(indices=tensor([[0, 1, 1], + [2, 0, 2]]), + values=tensor([3., 4., 5.]), + size=(2, 3), nnz=3, layout=torch.sparse_coo) + + >>> torch.sparse_coo_tensor(i, v, [2, 4], + ... dtype=torch.float64, + ... device=torch.device('cuda:0')) + tensor(indices=tensor([[0, 1, 1], + [2, 0, 2]]), + values=tensor([3., 4., 5.]), + device='cuda:0', size=(2, 4), nnz=3, dtype=torch.float64, + layout=torch.sparse_coo) + + # Create an empty sparse tensor with the following invariants: + # 1. sparse_dim + dense_dim = len(SparseTensor.shape) + # 2. SparseTensor._indices().shape = (sparse_dim, nnz) + # 3. SparseTensor._values().shape = (nnz, SparseTensor.shape[sparse_dim:]) + # + # For instance, to create an empty sparse tensor with nnz = 0, dense_dim = 0 and + # sparse_dim = 1 (hence indices is a 2D tensor of shape = (1, 0)) + >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1]) + tensor(indices=tensor([], size=(1, 0)), + values=tensor([], size=(0,)), + size=(1,), nnz=0, layout=torch.sparse_coo) + + # and to create an empty sparse tensor with nnz = 0, dense_dim = 1 and + # sparse_dim = 1 + >>> S = torch.sparse_coo_tensor(torch.empty([1, 0]), torch.empty([0, 2]), [1, 2]) + tensor(indices=tensor([], size=(1, 0)), + values=tensor([], size=(0, 2)), + size=(1, 2), nnz=0, layout=torch.sparse_coo) + +.. _torch.sparse: https://pytorch.org/docs/stable/sparse.html +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.sqrt, + r""" +sqrt(input, *, out=None) -> Tensor + +Returns a new tensor with the square-root of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \sqrt{\text{input}_{i}} +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-2.0755, 1.0226, 0.0831, 0.4806]) + >>> torch.sqrt(a) + tensor([ nan, 1.0112, 0.2883, 0.6933]) +""".format( + **common_args + ), +) + +add_docstr( + torch.square, + r""" +square(input, *, out=None) -> Tensor + +Returns a new tensor with the square of the elements of :attr:`input`. + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-2.0755, 1.0226, 0.0831, 0.4806]) + >>> torch.square(a) + tensor([ 4.3077, 1.0457, 0.0069, 0.2310]) +""".format( + **common_args + ), +) + +add_docstr( + torch.squeeze, + r""" +squeeze(input, dim=None) -> Tensor + +Returns a tensor with all specified dimensions of :attr:`input` of size `1` removed. + +For example, if `input` is of shape: +:math:`(A \times 1 \times B \times C \times 1 \times D)` then the `input.squeeze()` +will be of shape: :math:`(A \times B \times C \times D)`. + +When :attr:`dim` is given, a squeeze operation is done only in the given +dimension(s). If `input` is of shape: :math:`(A \times 1 \times B)`, +``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)`` +will squeeze the tensor to the shape :math:`(A \times B)`. + +.. note:: The returned tensor shares the storage with the input tensor, + so changing the contents of one will change the contents of the other. + +.. warning:: If the tensor has a batch dimension of size 1, then `squeeze(input)` + will also remove the batch dimension, which can lead to unexpected + errors. Consider specifying only the dims you wish to be squeezed. + +Args: + {input} + dim (int or tuple of ints, optional): if given, the input will be squeezed + only in the specified dimensions. + + .. versionchanged:: 2.0 + :attr:`dim` now accepts tuples of dimensions. + +Example:: + + >>> x = torch.zeros(2, 1, 2, 1, 2) + >>> x.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x) + >>> y.size() + torch.Size([2, 2, 2]) + >>> y = torch.squeeze(x, 0) + >>> y.size() + torch.Size([2, 1, 2, 1, 2]) + >>> y = torch.squeeze(x, 1) + >>> y.size() + torch.Size([2, 2, 1, 2]) + >>> y = torch.squeeze(x, (1, 2, 3)) + torch.Size([2, 2, 2]) +""".format( + **common_args + ), +) + +add_docstr( + torch.std, + r""" +std(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + +Calculates the standard deviation over the dimensions specified by :attr:`dim`. +:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to +reduce over all dimensions. + +The standard deviation (:math:`\sigma`) is calculated as + +.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + +where :math:`x` is the sample set of elements, :math:`\bar{x}` is the +sample mean, :math:`N` is the number of samples and :math:`\delta N` is +the :attr:`correction`. +""" + + r""" + +{keepdim_details} + +Args: + {input} + {dim} + +Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + {keepdim} + {out} + +Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std(a, dim=1, keepdim=True) + tensor([[1.0311], + [0.7477], + [1.2204], + [0.9087]]) + +.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.std_mean, + r""" +std_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + +Calculates the standard deviation and mean over the dimensions specified by +:attr:`dim`. :attr:`dim` can be a single dimension, list of dimensions, or +``None`` to reduce over all dimensions. + +The standard deviation (:math:`\sigma`) is calculated as + +.. math:: \sigma = \sqrt{\frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2} + +where :math:`x` is the sample set of elements, :math:`\bar{x}` is the +sample mean, :math:`N` is the number of samples and :math:`\delta N` is +the :attr:`correction`. + +""" + + r""" + +{keepdim_details} + +Args: + {input} + {opt_dim} + +Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + {keepdim} + {out} + +Returns: + A tuple (std, mean) containing the standard deviation and mean. + +Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.std_mean(a, dim=0, keepdim=True) + (tensor([[1.2620, 1.0028, 1.0957, 0.6038]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + +.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.sub, + r""" +sub(input, other, *, alpha=1, out=None) -> Tensor + +Subtracts :attr:`other`, scaled by :attr:`alpha`, from :attr:`input`. + +.. math:: + \text{{out}}_i = \text{{input}}_i - \text{{alpha}} \times \text{{other}}_i +""" + + r""" + +Supports :ref:`broadcasting to a common shape `, +:ref:`type promotion `, and integer, float, and complex inputs. + +Args: + {input} + other (Tensor or Number): the tensor or number to subtract from :attr:`input`. + +Keyword args: + alpha (Number): the multiplier for :attr:`other`. + {out} + +Example:: + + >>> a = torch.tensor((1, 2)) + >>> b = torch.tensor((0, 1)) + >>> torch.sub(a, b, alpha=2) + tensor([1, 0]) +""".format( + **common_args + ), +) + +add_docstr( + torch.subtract, + r""" +subtract(input, other, *, alpha=1, out=None) -> Tensor + +Alias for :func:`torch.sub`. +""", +) + +add_docstr( + torch.sum, + r""" +sum(input, *, dtype=None) -> Tensor + +Returns the sum of all elements in the :attr:`input` tensor. + +Args: + {input} + +Keyword args: + {dtype} + +Example:: + + >>> a = torch.randn(1, 3) + >>> a + tensor([[ 0.1133, -0.9567, 0.2958]]) + >>> torch.sum(a) + tensor(-0.5475) + +.. function:: sum(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + +Returns the sum of each row of the :attr:`input` tensor in the given +dimension :attr:`dim`. If :attr:`dim` is a list of dimensions, +reduce over all of them. + +{keepdim_details} + +Args: + {input} + {opt_dim} + {keepdim} + +Keyword args: + {dtype} + +Example:: + + >>> a = torch.randn(4, 4) + >>> a + tensor([[ 0.0569, -0.2475, 0.0737, -0.3429], + [-0.2993, 0.9138, 0.9337, -1.6864], + [ 0.1132, 0.7892, -0.1003, 0.5688], + [ 0.3637, -0.9906, -0.4752, -1.5197]]) + >>> torch.sum(a, 1) + tensor([-0.4598, -0.1381, 1.3708, -2.6217]) + >>> b = torch.arange(4 * 5 * 6).view(4, 5, 6) + >>> torch.sum(b, (2, 1)) + tensor([ 435., 1335., 2235., 3135.]) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.nansum, + r""" +nansum(input, *, dtype=None) -> Tensor + +Returns the sum of all elements, treating Not a Numbers (NaNs) as zero. + +Args: + {input} + +Keyword args: + {dtype} + +Example:: + + >>> a = torch.tensor([1., 2., float('nan'), 4.]) + >>> torch.nansum(a) + tensor(7.) + +.. function:: nansum(input, dim, keepdim=False, *, dtype=None) -> Tensor + :noindex: + +Returns the sum of each row of the :attr:`input` tensor in the given +dimension :attr:`dim`, treating Not a Numbers (NaNs) as zero. +If :attr:`dim` is a list of dimensions, reduce over all of them. + +{keepdim_details} + +Args: + {input} + {opt_dim} + {keepdim} + +Keyword args: + {dtype} + +Example:: + + >>> torch.nansum(torch.tensor([1., float("nan")])) + 1.0 + >>> a = torch.tensor([[1, 2], [3., float("nan")]]) + >>> torch.nansum(a) + tensor(6.) + >>> torch.nansum(a, dim=0) + tensor([4., 2.]) + >>> torch.nansum(a, dim=1) + tensor([3., 3.]) +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.svd, + r""" +svd(input, some=True, compute_uv=True, *, out=None) -> (Tensor, Tensor, Tensor) + +Computes the singular value decomposition of either a matrix or batch of +matrices :attr:`input`. The singular value decomposition is represented as a +namedtuple `(U, S, V)`, such that :attr:`input` :math:`= U \text{diag}(S) V^{\text{H}}`. +where :math:`V^{\text{H}}` is the transpose of `V` for real inputs, +and the conjugate transpose of `V` for complex inputs. +If :attr:`input` is a batch of matrices, then `U`, `S`, and `V` are also +batched with the same batch dimensions as :attr:`input`. + +If :attr:`some` is `True` (default), the method returns the reduced singular +value decomposition. In this case, if the last two dimensions of :attr:`input` are +`m` and `n`, then the returned `U` and `V` matrices will contain only +`min(n, m)` orthonormal columns. + +If :attr:`compute_uv` is `False`, the returned `U` and `V` will be +zero-filled matrices of shape `(m, m)` and `(n, n)` +respectively, and the same device as :attr:`input`. The argument :attr:`some` +has no effect when :attr:`compute_uv` is `False`. + +Supports :attr:`input` of float, double, cfloat and cdouble data types. +The dtypes of `U` and `V` are the same as :attr:`input`'s. `S` will +always be real-valued, even if :attr:`input` is complex. + +.. warning:: + + :func:`torch.svd` is deprecated in favor of :func:`torch.linalg.svd` + and will be removed in a future PyTorch release. + + ``U, S, V = torch.svd(A, some=some, compute_uv=True)`` (default) should be replaced with + + .. code:: python + + U, S, Vh = torch.linalg.svd(A, full_matrices=not some) + V = Vh.mH + + ``_, S, _ = torch.svd(A, some=some, compute_uv=False)`` should be replaced with + + .. code:: python + + S = torch.linalg.svdvals(A) + +.. note:: Differences with :func:`torch.linalg.svd`: + + * :attr:`some` is the opposite of + :func:`torch.linalg.svd`'s :attr:`full_matrices`. Note that + default value for both is `True`, so the default behavior is + effectively the opposite. + * :func:`torch.svd` returns `V`, whereas :func:`torch.linalg.svd` returns + `Vh`, that is, :math:`V^{\text{H}}`. + * If :attr:`compute_uv` is `False`, :func:`torch.svd` returns zero-filled + tensors for `U` and `Vh`, whereas :func:`torch.linalg.svd` returns + empty tensors. + +.. note:: The singular values are returned in descending order. If :attr:`input` is a batch of matrices, + then the singular values of each matrix in the batch are returned in descending order. + +.. note:: The `S` tensor can only be used to compute gradients if :attr:`compute_uv` is `True`. + +.. note:: When :attr:`some` is `False`, the gradients on `U[..., :, min(m, n):]` + and `V[..., :, min(m, n):]` will be ignored in the backward pass, as those vectors + can be arbitrary bases of the corresponding subspaces. + +.. note:: The implementation of :func:`torch.linalg.svd` on CPU uses LAPACK's routine `?gesdd` + (a divide-and-conquer algorithm) instead of `?gesvd` for speed. Analogously, + on GPU, it uses cuSOLVER's routines `gesvdj` and `gesvdjBatched` on CUDA 10.1.243 + and later, and MAGMA's routine `gesdd` on earlier versions of CUDA. + +.. note:: The returned `U` will not be contiguous. The matrix (or batch of matrices) will + be represented as a column-major matrix (i.e. Fortran-contiguous). + +.. warning:: The gradients with respect to `U` and `V` will only be finite when the input does not + have zero nor repeated singular values. + +.. warning:: If the distance between any two singular values is close to zero, the gradients with respect to + `U` and `V` will be numerically unstable, as they depends on + :math:`\frac{1}{\min_{i \neq j} \sigma_i^2 - \sigma_j^2}`. The same happens when the matrix + has small singular values, as these gradients also depend on `S^{-1}`. + +.. warning:: For complex-valued :attr:`input` the singular value decomposition is not unique, + as `U` and `V` may be multiplied by an arbitrary phase factor :math:`e^{i \phi}` on every column. + The same happens when :attr:`input` has repeated singular values, where one may multiply + the columns of the spanning subspace in `U` and `V` by a rotation matrix + and `the resulting vectors will span the same subspace`_. + Different platforms, like NumPy, or inputs on different device types, + may produce different `U` and `V` tensors. + +Args: + input (Tensor): the input tensor of size `(*, m, n)` where `*` is zero or more + batch dimensions consisting of `(m, n)` matrices. + some (bool, optional): controls whether to compute the reduced or full decomposition, and + consequently, the shape of returned `U` and `V`. Default: `True`. + compute_uv (bool, optional): controls whether to compute `U` and `V`. Default: `True`. + +Keyword args: + out (tuple, optional): the output tuple of tensors + +Example:: + + >>> a = torch.randn(5, 3) + >>> a + tensor([[ 0.2364, -0.7752, 0.6372], + [ 1.7201, 0.7394, -0.0504], + [-0.3371, -1.0584, 0.5296], + [ 0.3550, -0.4022, 1.5569], + [ 0.2445, -0.0158, 1.1414]]) + >>> u, s, v = torch.svd(a) + >>> u + tensor([[ 0.4027, 0.0287, 0.5434], + [-0.1946, 0.8833, 0.3679], + [ 0.4296, -0.2890, 0.5261], + [ 0.6604, 0.2717, -0.2618], + [ 0.4234, 0.2481, -0.4733]]) + >>> s + tensor([2.3289, 2.0315, 0.7806]) + >>> v + tensor([[-0.0199, 0.8766, 0.4809], + [-0.5080, 0.4054, -0.7600], + [ 0.8611, 0.2594, -0.4373]]) + >>> torch.dist(a, torch.mm(torch.mm(u, torch.diag(s)), v.t())) + tensor(8.6531e-07) + >>> a_big = torch.randn(7, 5, 3) + >>> u, s, v = torch.svd(a_big) + >>> torch.dist(a_big, torch.matmul(torch.matmul(u, torch.diag_embed(s)), v.mT)) + tensor(2.6503e-06) + +.. _the resulting vectors will span the same subspace: + (https://en.wikipedia.org/wiki/Singular_value_decomposition#Singular_values,_singular_vectors,_and_their_relation_to_the_SVD) +""", +) + + +add_docstr( + torch.t, + r""" +t(input) -> Tensor + +Expects :attr:`input` to be <= 2-D tensor and transposes dimensions 0 +and 1. + +0-D and 1-D tensors are returned as is. When input is a 2-D tensor this +is equivalent to ``transpose(input, 0, 1)``. + +Args: + {input} + +Example:: + + >>> x = torch.randn(()) + >>> x + tensor(0.1995) + >>> torch.t(x) + tensor(0.1995) + >>> x = torch.randn(3) + >>> x + tensor([ 2.4320, -0.4608, 0.7702]) + >>> torch.t(x) + tensor([ 2.4320, -0.4608, 0.7702]) + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 0.4875, 0.9158, -0.5872], + [ 0.3938, -0.6929, 0.6932]]) + >>> torch.t(x) + tensor([[ 0.4875, 0.3938], + [ 0.9158, -0.6929], + [-0.5872, 0.6932]]) + +See also :func:`torch.transpose`. +""".format( + **common_args + ), +) + +add_docstr( + torch.flip, + r""" +flip(input, dims) -> Tensor + +Reverse the order of an n-D tensor along given axis in dims. + +.. note:: + `torch.flip` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flip`, + which returns a view in constant time. Since copying a tensor's data is more work than viewing that data, + `torch.flip` is expected to be slower than `np.flip`. + +Args: + {input} + dims (a list or tuple): axis to flip on + +Example:: + + >>> x = torch.arange(8).view(2, 2, 2) + >>> x + tensor([[[ 0, 1], + [ 2, 3]], + + [[ 4, 5], + [ 6, 7]]]) + >>> torch.flip(x, [0, 1]) + tensor([[[ 6, 7], + [ 4, 5]], + + [[ 2, 3], + [ 0, 1]]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.fliplr, + r""" +fliplr(input) -> Tensor + +Flip tensor in the left/right direction, returning a new tensor. + +Flip the entries in each row in the left/right direction. +Columns are preserved, but appear in a different order than before. + +Note: + Requires the tensor to be at least 2-D. + +.. note:: + `torch.fliplr` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.fliplr`, + which returns a view in constant time. Since copying a tensor's data is more work than viewing that data, + `torch.fliplr` is expected to be slower than `np.fliplr`. + +Args: + input (Tensor): Must be at least 2-dimensional. + +Example:: + + >>> x = torch.arange(4).view(2, 2) + >>> x + tensor([[0, 1], + [2, 3]]) + >>> torch.fliplr(x) + tensor([[1, 0], + [3, 2]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.flipud, + r""" +flipud(input) -> Tensor + +Flip tensor in the up/down direction, returning a new tensor. + +Flip the entries in each column in the up/down direction. +Rows are preserved, but appear in a different order than before. + +Note: + Requires the tensor to be at least 1-D. + +.. note:: + `torch.flipud` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flipud`, + which returns a view in constant time. Since copying a tensor's data is more work than viewing that data, + `torch.flipud` is expected to be slower than `np.flipud`. + +Args: + input (Tensor): Must be at least 1-dimensional. + +Example:: + + >>> x = torch.arange(4).view(2, 2) + >>> x + tensor([[0, 1], + [2, 3]]) + >>> torch.flipud(x) + tensor([[2, 3], + [0, 1]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.roll, + r""" +roll(input, shifts, dims=None) -> Tensor + +Roll the tensor :attr:`input` along the given dimension(s). Elements that are +shifted beyond the last position are re-introduced at the first position. If +:attr:`dims` is `None`, the tensor will be flattened before rolling and then +restored to the original shape. + +Args: + {input} + shifts (int or tuple of ints): The number of places by which the elements + of the tensor are shifted. If shifts is a tuple, dims must be a tuple of + the same size, and each dimension will be rolled by the corresponding + value + dims (int or tuple of ints): Axis along which to roll + +Example:: + + >>> x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2) + >>> x + tensor([[1, 2], + [3, 4], + [5, 6], + [7, 8]]) + >>> torch.roll(x, 1) + tensor([[8, 1], + [2, 3], + [4, 5], + [6, 7]]) + >>> torch.roll(x, 1, 0) + tensor([[7, 8], + [1, 2], + [3, 4], + [5, 6]]) + >>> torch.roll(x, -1, 0) + tensor([[3, 4], + [5, 6], + [7, 8], + [1, 2]]) + >>> torch.roll(x, shifts=(2, 1), dims=(0, 1)) + tensor([[6, 5], + [8, 7], + [2, 1], + [4, 3]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.rot90, + r""" +rot90(input, k=1, dims=[0,1]) -> Tensor + +Rotate an n-D tensor by 90 degrees in the plane specified by dims axis. +Rotation direction is from the first towards the second axis if k > 0, and from the second towards the first for k < 0. + +Args: + {input} + k (int): number of times to rotate. Default value is 1 + dims (a list or tuple): axis to rotate. Default value is [0, 1] + +Example:: + + >>> x = torch.arange(4).view(2, 2) + >>> x + tensor([[0, 1], + [2, 3]]) + >>> torch.rot90(x, 1, [0, 1]) + tensor([[1, 3], + [0, 2]]) + + >>> x = torch.arange(8).view(2, 2, 2) + >>> x + tensor([[[0, 1], + [2, 3]], + + [[4, 5], + [6, 7]]]) + >>> torch.rot90(x, 1, [1, 2]) + tensor([[[1, 3], + [0, 2]], + + [[5, 7], + [4, 6]]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.take, + r""" +take(input, index) -> Tensor + +Returns a new tensor with the elements of :attr:`input` at the given indices. +The input tensor is treated as if it were viewed as a 1-D tensor. The result +takes the same shape as the indices. + +Args: + {input} + index (LongTensor): the indices into tensor + +Example:: + + >>> src = torch.tensor([[4, 3, 5], + ... [6, 7, 8]]) + >>> torch.take(src, torch.tensor([0, 2, 5])) + tensor([ 4, 5, 8]) +""".format( + **common_args + ), +) + +add_docstr( + torch.take_along_dim, + r""" +take_along_dim(input, indices, dim=None, *, out=None) -> Tensor + +Selects values from :attr:`input` at the 1-dimensional indices from :attr:`indices` along the given :attr:`dim`. + +If :attr:`dim` is None, the input array is treated as if it has been flattened to 1d. + +Functions that return indices along a dimension, like :func:`torch.argmax` and :func:`torch.argsort`, +are designed to work with this function. See the examples below. + +.. note:: + This function is similar to NumPy's `take_along_axis`. + See also :func:`torch.gather`. + +Args: + {input} + indices (tensor): the indices into :attr:`input`. Must have long dtype. + dim (int, optional): dimension to select along. + +Keyword args: + {out} + +Example:: + + >>> t = torch.tensor([[10, 30, 20], [60, 40, 50]]) + >>> max_idx = torch.argmax(t) + >>> torch.take_along_dim(t, max_idx) + tensor([60]) + >>> sorted_idx = torch.argsort(t, dim=1) + >>> torch.take_along_dim(t, sorted_idx, dim=1) + tensor([[10, 20, 30], + [40, 50, 60]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.tan, + r""" +tan(input, *, out=None) -> Tensor + +Returns a new tensor with the tangent of the elements of :attr:`input`. + +.. math:: + \text{out}_{i} = \tan(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([-1.2027, -1.7687, 0.4412, -1.3856]) + >>> torch.tan(a) + tensor([-2.5930, 4.9859, 0.4722, -5.3366]) +""".format( + **common_args + ), +) + +add_docstr( + torch.tanh, + r""" +tanh(input, *, out=None) -> Tensor + +Returns a new tensor with the hyperbolic tangent of the elements +of :attr:`input`. + +.. math:: + \text{out}_{i} = \tanh(\text{input}_{i}) +""" + + r""" +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 0.8986, -0.7279, 1.1745, 0.2611]) + >>> torch.tanh(a) + tensor([ 0.7156, -0.6218, 0.8257, 0.2553]) +""".format( + **common_args + ), +) + +add_docstr( + # torch.softmax doc str. Point this to torch.nn.functional.softmax + torch.softmax, + r""" +softmax(input, dim, *, dtype=None) -> Tensor + +Alias for :func:`torch.nn.functional.softmax`. +""", +) + +add_docstr( + torch.topk, + r""" +topk(input, k, dim=None, largest=True, sorted=True, *, out=None) -> (Tensor, LongTensor) + +Returns the :attr:`k` largest elements of the given :attr:`input` tensor along +a given dimension. + +If :attr:`dim` is not given, the last dimension of the `input` is chosen. + +If :attr:`largest` is ``False`` then the `k` smallest elements are returned. + +A namedtuple of `(values, indices)` is returned with the `values` and +`indices` of the largest `k` elements of each row of the `input` tensor in the +given dimension `dim`. + +The boolean option :attr:`sorted` if ``True``, will make sure that the returned +`k` elements are themselves sorted + +Args: + {input} + k (int): the k in "top-k" + dim (int, optional): the dimension to sort along + largest (bool, optional): controls whether to return largest or + smallest elements + sorted (bool, optional): controls whether to return the elements + in sorted order + +Keyword args: + out (tuple, optional): the output tuple of (Tensor, LongTensor) that can be + optionally given to be used as output buffers + +Example:: + + >>> x = torch.arange(1., 6.) + >>> x + tensor([ 1., 2., 3., 4., 5.]) + >>> torch.topk(x, 3) + torch.return_types.topk(values=tensor([5., 4., 3.]), indices=tensor([4, 3, 2])) +""".format( + **common_args + ), +) + +add_docstr( + torch.trace, + r""" +trace(input) -> Tensor + +Returns the sum of the elements of the diagonal of the input 2-D matrix. + +Example:: + + >>> x = torch.arange(1., 10.).view(3, 3) + >>> x + tensor([[ 1., 2., 3.], + [ 4., 5., 6.], + [ 7., 8., 9.]]) + >>> torch.trace(x) + tensor(15.) +""", +) + +add_docstr( + torch.transpose, + r""" +transpose(input, dim0, dim1) -> Tensor + +Returns a tensor that is a transposed version of :attr:`input`. +The given dimensions :attr:`dim0` and :attr:`dim1` are swapped. + +If :attr:`input` is a strided tensor then the resulting :attr:`out` +tensor shares its underlying storage with the :attr:`input` tensor, so +changing the content of one would change the content of the other. + +If :attr:`input` is a :ref:`sparse tensor ` then the +resulting :attr:`out` tensor *does not* share the underlying storage +with the :attr:`input` tensor. + +If :attr:`input` is a :ref:`sparse tensor ` with compressed +layout (SparseCSR, SparseBSR, SparseCSC or SparseBSC) the arguments +:attr:`dim0` and :attr:`dim1` must be both batch dimensions, or must +both be sparse dimensions. The batch dimensions of a sparse tensor are the +dimensions preceding the sparse dimensions. + +.. note:: + Transpositions which interchange the sparse dimensions of a `SparseCSR` + or `SparseCSC` layout tensor will result in the layout changing between + the two options. Transposition of the sparse dimensions of a ` SparseBSR` + or `SparseBSC` layout tensor will likewise generate a result with the + opposite layout. + + +Args: + {input} + dim0 (int): the first dimension to be transposed + dim1 (int): the second dimension to be transposed + +Example:: + + >>> x = torch.randn(2, 3) + >>> x + tensor([[ 1.0028, -0.9893, 0.5809], + [-0.1669, 0.7299, 0.4942]]) + >>> torch.transpose(x, 0, 1) + tensor([[ 1.0028, -0.1669], + [-0.9893, 0.7299], + [ 0.5809, 0.4942]]) + +See also :func:`torch.t`. +""".format( + **common_args + ), +) + +add_docstr( + torch.triangular_solve, + r""" +triangular_solve(b, A, upper=True, transpose=False, unitriangular=False, *, out=None) -> (Tensor, Tensor) + +Solves a system of equations with a square upper or lower triangular invertible matrix :math:`A` +and multiple right-hand sides :math:`b`. + +In symbols, it solves :math:`AX = b` and assumes :math:`A` is square upper-triangular +(or lower-triangular if :attr:`upper`\ `= False`) and does not have zeros on the diagonal. + +`torch.triangular_solve(b, A)` can take in 2D inputs `b, A` or inputs that are +batches of 2D matrices. If the inputs are batches, then returns +batched outputs `X` + +If the diagonal of :attr:`A` contains zeros or elements that are very close to zero and +:attr:`unitriangular`\ `= False` (default) or if the input matrix is badly conditioned, +the result may contain `NaN` s. + +Supports input of float, double, cfloat and cdouble data types. + +.. warning:: + + :func:`torch.triangular_solve` is deprecated in favor of :func:`torch.linalg.solve_triangular` + and will be removed in a future PyTorch release. + :func:`torch.linalg.solve_triangular` has its arguments reversed and does not return a + copy of one of the inputs. + + ``X = torch.triangular_solve(B, A).solution`` should be replaced with + + .. code:: python + + X = torch.linalg.solve_triangular(A, B) + +Args: + b (Tensor): multiple right-hand sides of size :math:`(*, m, k)` where + :math:`*` is zero of more batch dimensions + A (Tensor): the input triangular coefficient matrix of size :math:`(*, m, m)` + where :math:`*` is zero or more batch dimensions + upper (bool, optional): whether :math:`A` is upper or lower triangular. Default: ``True``. + transpose (bool, optional): solves `op(A)X = b` where `op(A) = A^T` if this flag is ``True``, + and `op(A) = A` if it is ``False``. Default: ``False``. + unitriangular (bool, optional): whether :math:`A` is unit triangular. + If True, the diagonal elements of :math:`A` are assumed to be + 1 and not referenced from :math:`A`. Default: ``False``. + +Keyword args: + out ((Tensor, Tensor), optional): tuple of two tensors to write + the output to. Ignored if `None`. Default: `None`. + +Returns: + A namedtuple `(solution, cloned_coefficient)` where `cloned_coefficient` + is a clone of :math:`A` and `solution` is the solution :math:`X` to :math:`AX = b` + (or whatever variant of the system of equations, depending on the keyword arguments.) + +Examples:: + + >>> A = torch.randn(2, 2).triu() + >>> A + tensor([[ 1.1527, -1.0753], + [ 0.0000, 0.7986]]) + >>> b = torch.randn(2, 3) + >>> b + tensor([[-0.0210, 2.3513, -1.5492], + [ 1.5429, 0.7403, -1.0243]]) + >>> torch.triangular_solve(b, A) + torch.return_types.triangular_solve( + solution=tensor([[ 1.7841, 2.9046, -2.5405], + [ 1.9320, 0.9270, -1.2826]]), + cloned_coefficient=tensor([[ 1.1527, -1.0753], + [ 0.0000, 0.7986]])) +""", +) + +add_docstr( + torch.tril, + r""" +tril(input, diagonal=0, *, out=None) -> Tensor + +Returns the lower triangular part of the matrix (2-D tensor) or batch of matrices +:attr:`input`, the other elements of the result tensor :attr:`out` are set to 0. + +The lower triangular part of the matrix is defined as the elements on and +below the diagonal. + +The argument :attr:`diagonal` controls which diagonal to consider. If +:attr:`diagonal` = 0, all elements on and below the main diagonal are +retained. A positive value includes just as many diagonals above the main +diagonal, and similarly a negative value excludes just as many diagonals below +the main diagonal. The main diagonal are the set of indices +:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where +:math:`d_{1}, d_{2}` are the dimensions of the matrix. +""" + + r""" +Args: + {input} + diagonal (int, optional): the diagonal to consider + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[-1.0813, -0.8619, 0.7105], + [ 0.0935, 0.1380, 2.2112], + [-0.3409, -0.9828, 0.0289]]) + >>> torch.tril(a) + tensor([[-1.0813, 0.0000, 0.0000], + [ 0.0935, 0.1380, 0.0000], + [-0.3409, -0.9828, 0.0289]]) + + >>> b = torch.randn(4, 6) + >>> b + tensor([[ 1.2219, 0.5653, -0.2521, -0.2345, 1.2544, 0.3461], + [ 0.4785, -0.4477, 0.6049, 0.6368, 0.8775, 0.7145], + [ 1.1502, 3.2716, -1.1243, -0.5413, 0.3615, 0.6864], + [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0978]]) + >>> torch.tril(b, diagonal=1) + tensor([[ 1.2219, 0.5653, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.4785, -0.4477, 0.6049, 0.0000, 0.0000, 0.0000], + [ 1.1502, 3.2716, -1.1243, -0.5413, 0.0000, 0.0000], + [-0.0614, -0.7344, -1.3164, -0.7648, -1.4024, 0.0000]]) + >>> torch.tril(b, diagonal=-1) + tensor([[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 0.4785, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [ 1.1502, 3.2716, 0.0000, 0.0000, 0.0000, 0.0000], + [-0.0614, -0.7344, -1.3164, 0.0000, 0.0000, 0.0000]]) +""".format( + **common_args + ), +) + +# docstr is split in two parts to avoid format mis-captureing :math: braces '{}' +# as common args. +add_docstr( + torch.tril_indices, + r""" +tril_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor + +Returns the indices of the lower triangular part of a :attr:`row`-by- +:attr:`col` matrix in a 2-by-N Tensor, where the first row contains row +coordinates of all indices and the second row contains column coordinates. +Indices are ordered based on rows and then columns. + +The lower triangular part of the matrix is defined as the elements on and +below the diagonal. + +The argument :attr:`offset` controls which diagonal to consider. If +:attr:`offset` = 0, all elements on and below the main diagonal are +retained. A positive value includes just as many diagonals above the main +diagonal, and similarly a negative value excludes just as many diagonals below +the main diagonal. The main diagonal are the set of indices +:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` +where :math:`d_{1}, d_{2}` are the dimensions of the matrix. + +.. note:: + When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to + prevent overflow during calculation. +""" + + r""" +Args: + row (``int``): number of rows in the 2-D matrix. + col (``int``): number of columns in the 2-D matrix. + offset (``int``): diagonal offset from the main diagonal. + Default: if not provided, 0. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, ``torch.long``. + {device} + layout (:class:`torch.layout`, optional): currently only support ``torch.strided``. + +Example:: + + >>> a = torch.tril_indices(3, 3) + >>> a + tensor([[0, 1, 1, 2, 2, 2], + [0, 0, 1, 0, 1, 2]]) + + >>> a = torch.tril_indices(4, 3, -1) + >>> a + tensor([[1, 2, 2, 3, 3, 3], + [0, 0, 1, 0, 1, 2]]) + + >>> a = torch.tril_indices(4, 3, 1) + >>> a + tensor([[0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3], + [0, 1, 0, 1, 2, 0, 1, 2, 0, 1, 2]]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.triu, + r""" +triu(input, diagonal=0, *, out=None) -> Tensor + +Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices +:attr:`input`, the other elements of the result tensor :attr:`out` are set to 0. + +The upper triangular part of the matrix is defined as the elements on and +above the diagonal. + +The argument :attr:`diagonal` controls which diagonal to consider. If +:attr:`diagonal` = 0, all elements on and above the main diagonal are +retained. A positive value excludes just as many diagonals above the main +diagonal, and similarly a negative value includes just as many diagonals below +the main diagonal. The main diagonal are the set of indices +:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where +:math:`d_{1}, d_{2}` are the dimensions of the matrix. +""" + + r""" +Args: + {input} + diagonal (int, optional): the diagonal to consider + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(3, 3) + >>> a + tensor([[ 0.2309, 0.5207, 2.0049], + [ 0.2072, -1.0680, 0.6602], + [ 0.3480, -0.5211, -0.4573]]) + >>> torch.triu(a) + tensor([[ 0.2309, 0.5207, 2.0049], + [ 0.0000, -1.0680, 0.6602], + [ 0.0000, 0.0000, -0.4573]]) + >>> torch.triu(a, diagonal=1) + tensor([[ 0.0000, 0.5207, 2.0049], + [ 0.0000, 0.0000, 0.6602], + [ 0.0000, 0.0000, 0.0000]]) + >>> torch.triu(a, diagonal=-1) + tensor([[ 0.2309, 0.5207, 2.0049], + [ 0.2072, -1.0680, 0.6602], + [ 0.0000, -0.5211, -0.4573]]) + + >>> b = torch.randn(4, 6) + >>> b + tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235], + [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857], + [ 0.4333, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410], + [-0.9888, 1.0679, -1.3337, -1.6556, 0.4798, 0.2830]]) + >>> torch.triu(b, diagonal=1) + tensor([[ 0.0000, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235], + [ 0.0000, 0.0000, -1.2919, 1.3378, -0.1768, -1.0857], + [ 0.0000, 0.0000, 0.0000, -1.0432, 0.9348, -0.4410], + [ 0.0000, 0.0000, 0.0000, 0.0000, 0.4798, 0.2830]]) + >>> torch.triu(b, diagonal=-1) + tensor([[ 0.5876, -0.0794, -1.8373, 0.6654, 0.2604, 1.5235], + [-0.2447, 0.9556, -1.2919, 1.3378, -0.1768, -1.0857], + [ 0.0000, 0.3146, 0.6576, -1.0432, 0.9348, -0.4410], + [ 0.0000, 0.0000, -1.3337, -1.6556, 0.4798, 0.2830]]) +""".format( + **common_args + ), +) + +# docstr is split in two parts to avoid format mis-capturing :math: braces '{}' +# as common args. +add_docstr( + torch.triu_indices, + r""" +triu_indices(row, col, offset=0, *, dtype=torch.long, device='cpu', layout=torch.strided) -> Tensor + +Returns the indices of the upper triangular part of a :attr:`row` by +:attr:`col` matrix in a 2-by-N Tensor, where the first row contains row +coordinates of all indices and the second row contains column coordinates. +Indices are ordered based on rows and then columns. + +The upper triangular part of the matrix is defined as the elements on and +above the diagonal. + +The argument :attr:`offset` controls which diagonal to consider. If +:attr:`offset` = 0, all elements on and above the main diagonal are +retained. A positive value excludes just as many diagonals above the main +diagonal, and similarly a negative value includes just as many diagonals below +the main diagonal. The main diagonal are the set of indices +:math:`\lbrace (i, i) \rbrace` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` +where :math:`d_{1}, d_{2}` are the dimensions of the matrix. + +.. note:: + When running on CUDA, ``row * col`` must be less than :math:`2^{59}` to + prevent overflow during calculation. +""" + + r""" +Args: + row (``int``): number of rows in the 2-D matrix. + col (``int``): number of columns in the 2-D matrix. + offset (``int``): diagonal offset from the main diagonal. + Default: if not provided, 0. + +Keyword args: + dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor. + Default: if ``None``, ``torch.long``. + {device} + layout (:class:`torch.layout`, optional): currently only support ``torch.strided``. + +Example:: + + >>> a = torch.triu_indices(3, 3) + >>> a + tensor([[0, 0, 0, 1, 1, 2], + [0, 1, 2, 1, 2, 2]]) + + >>> a = torch.triu_indices(4, 3, -1) + >>> a + tensor([[0, 0, 0, 1, 1, 1, 2, 2, 3], + [0, 1, 2, 0, 1, 2, 1, 2, 2]]) + + >>> a = torch.triu_indices(4, 3, 1) + >>> a + tensor([[0, 0, 1], + [1, 2, 2]]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.true_divide, + r""" +true_divide(dividend, divisor, *, out) -> Tensor + +Alias for :func:`torch.div` with ``rounding_mode=None``. +""", +) + +add_docstr( + torch.trunc, + r""" +trunc(input, *, out=None) -> Tensor + +Returns a new tensor with the truncated integer values of +the elements of :attr:`input`. + +For integer inputs, follows the array-api convention of returning a +copy of the input tensor. + +Args: + {input} + +Keyword args: + {out} + +Example:: + + >>> a = torch.randn(4) + >>> a + tensor([ 3.4742, 0.5466, -0.8008, -0.9079]) + >>> torch.trunc(a) + tensor([ 3., 0., -0., -0.]) +""".format( + **common_args + ), +) + +add_docstr( + torch.fake_quantize_per_tensor_affine, + r""" +fake_quantize_per_tensor_affine(input, scale, zero_point, quant_min, quant_max) -> Tensor + +Returns a new tensor with the data in :attr:`input` fake quantized using :attr:`scale`, +:attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`. + +.. math:: + \text{output} = ( + min( + \text{quant\_max}, + max( + \text{quant\_min}, + \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point} + ) + ) - \text{zero\_point} + ) \times \text{scale} + +Args: + input (Tensor): the input value(s), ``torch.float32`` tensor + scale (double scalar or ``float32`` Tensor): quantization scale + zero_point (int64 scalar or ``int32`` Tensor): quantization zero_point + quant_min (int64): lower bound of the quantized domain + quant_max (int64): upper bound of the quantized domain + +Returns: + Tensor: A newly fake_quantized ``torch.float32`` tensor + +Example:: + + >>> x = torch.randn(4) + >>> x + tensor([ 0.0552, 0.9730, 0.3973, -1.0780]) + >>> torch.fake_quantize_per_tensor_affine(x, 0.1, 0, 0, 255) + tensor([0.1000, 1.0000, 0.4000, 0.0000]) + >>> torch.fake_quantize_per_tensor_affine(x, torch.tensor(0.1), torch.tensor(0), 0, 255) + tensor([0.1000, 1.0000, 0.4000, 0.0000]) +""", +) + +add_docstr( + torch.fake_quantize_per_channel_affine, + r""" +fake_quantize_per_channel_affine(input, scale, zero_point, axis, quant_min, quant_max) -> Tensor + +Returns a new tensor with the data in :attr:`input` fake quantized per channel using :attr:`scale`, +:attr:`zero_point`, :attr:`quant_min` and :attr:`quant_max`, across the channel specified by :attr:`axis`. + +.. math:: + \text{output} = ( + min( + \text{quant\_max}, + max( + \text{quant\_min}, + \text{std::nearby\_int}(\text{input} / \text{scale}) + \text{zero\_point} + ) + ) - \text{zero\_point} + ) \times \text{scale} + +Args: + input (Tensor): the input value(s), in ``torch.float32`` + scale (Tensor): quantization scale, per channel in ``torch.float32`` + zero_point (Tensor): quantization zero_point, per channel in ``torch.int32`` or ``torch.half`` or ``torch.float32`` + axis (int32): channel axis + quant_min (int64): lower bound of the quantized domain + quant_max (int64): upper bound of the quantized domain + +Returns: + Tensor: A newly fake_quantized per channel ``torch.float32`` tensor + +Example:: + + >>> x = torch.randn(2, 2, 2) + >>> x + tensor([[[-0.2525, -0.0466], + [ 0.3491, -0.2168]], + + [[-0.5906, 1.6258], + [ 0.6444, -0.0542]]]) + >>> scales = (torch.randn(2) + 1) * 0.05 + >>> scales + tensor([0.0475, 0.0486]) + >>> zero_points = torch.zeros(2).to(torch.int32) + >>> zero_points + tensor([0, 0]) + >>> torch.fake_quantize_per_channel_affine(x, scales, zero_points, 1, 0, 255) + tensor([[[0.0000, 0.0000], + [0.3405, 0.0000]], + + [[0.0000, 1.6134], + [0.6323, 0.0000]]]) +""", +) + +add_docstr( + torch.fix, + r""" +fix(input, *, out=None) -> Tensor + +Alias for :func:`torch.trunc` +""", +) + +add_docstr( + torch.unsqueeze, + r""" +unsqueeze(input, dim) -> Tensor + +Returns a new tensor with a dimension of size one inserted at the +specified position. + +The returned tensor shares the same underlying data with this tensor. + +A :attr:`dim` value within the range ``[-input.dim() - 1, input.dim() + 1)`` +can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze` +applied at :attr:`dim` = ``dim + input.dim() + 1``. + +Args: + {input} + dim (int): the index at which to insert the singleton dimension + +Example:: + + >>> x = torch.tensor([1, 2, 3, 4]) + >>> torch.unsqueeze(x, 0) + tensor([[ 1, 2, 3, 4]]) + >>> torch.unsqueeze(x, 1) + tensor([[ 1], + [ 2], + [ 3], + [ 4]]) +""".format( + **common_args + ), +) + +add_docstr( + torch.var, + r""" +var(input, dim=None, *, correction=1, keepdim=False, out=None) -> Tensor + +Calculates the variance over the dimensions specified by :attr:`dim`. :attr:`dim` +can be a single dimension, list of dimensions, or ``None`` to reduce over all +dimensions. + +The variance (:math:`\sigma^2`) is calculated as + +.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + +where :math:`x` is the sample set of elements, :math:`\bar{x}` is the +sample mean, :math:`N` is the number of samples and :math:`\delta N` is +the :attr:`correction`. +""" + + r""" + +{keepdim_details} + +Args: + {input} + {opt_dim} + +Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + {keepdim} + {out} + +Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var(a, dim=1, keepdim=True) + tensor([[1.0631], + [0.5590], + [1.4893], + [0.8258]]) + +.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.var_mean, + r""" +var_mean(input, dim=None, *, correction=1, keepdim=False, out=None) -> (Tensor, Tensor) + +Calculates the variance and mean over the dimensions specified by :attr:`dim`. +:attr:`dim` can be a single dimension, list of dimensions, or ``None`` to +reduce over all dimensions. + +The variance (:math:`\sigma^2`) is calculated as + +.. math:: \sigma^2 = \frac{1}{\max(0,~N - \delta N)}\sum_{i=0}^{N-1}(x_i-\bar{x})^2 + +where :math:`x` is the sample set of elements, :math:`\bar{x}` is the +sample mean, :math:`N` is the number of samples and :math:`\delta N` is +the :attr:`correction`. +""" + + r""" + +{keepdim_details} + +Args: + {input} + {opt_dim} + +Keyword args: + correction (int): difference between the sample size and sample degrees of freedom. + Defaults to `Bessel's correction`_, ``correction=1``. + + .. versionchanged:: 2.0 + Previously this argument was called ``unbiased`` and was a boolean + with ``True`` corresponding to ``correction=1`` and ``False`` being + ``correction=0``. + {keepdim} + {out} + +Returns: + A tuple (var, mean) containing the variance and mean. + +Example: + + >>> a = torch.tensor( + ... [[ 0.2035, 1.2959, 1.8101, -0.4644], + ... [ 1.5027, -0.3270, 0.5905, 0.6538], + ... [-1.5745, 1.3330, -0.5596, -0.6548], + ... [ 0.1264, -0.5080, 1.6420, 0.1992]]) + >>> torch.var_mean(a, dim=0, keepdim=True) + (tensor([[1.5926, 1.0056, 1.2005, 0.3646]]), + tensor([[ 0.0645, 0.4485, 0.8707, -0.0665]])) + +.. _Bessel's correction: https://en.wikipedia.org/wiki/Bessel%27s_correction + +""".format( + **multi_dim_common + ), +) + +add_docstr( + torch.zeros, + r""" +zeros(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Returns a tensor filled with the scalar value `0`, with the shape defined +by the variable argument :attr:`size`. + +Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + +Keyword args: + {out} + {dtype} + {layout} + {device} + {requires_grad} + +Example:: + + >>> torch.zeros(2, 3) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]]) + + >>> torch.zeros(5) + tensor([ 0., 0., 0., 0., 0.]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.zeros_like, + r""" +zeros_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + +Returns a tensor filled with the scalar value `0`, with the same size as +:attr:`input`. ``torch.zeros_like(input)`` is equivalent to +``torch.zeros(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + +.. warning:: + As of 0.4, this function does not support an :attr:`out` keyword. As an alternative, + the old ``torch.zeros_like(input, out=output)`` is equivalent to + ``torch.zeros(input.size(), out=output)``. + +Args: + {input} + +Keyword args: + {dtype} + {layout} + {device} + {requires_grad} + {memory_format} + +Example:: + + >>> input = torch.empty(2, 3) + >>> torch.zeros_like(input) + tensor([[ 0., 0., 0.], + [ 0., 0., 0.]]) +""".format( + **factory_like_common_args + ), +) + +add_docstr( + torch.empty, + """ +empty(*size, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False, \ +memory_format=torch.contiguous_format) -> Tensor + +Returns a tensor filled with uninitialized data. The shape of the tensor is +defined by the variable argument :attr:`size`. + +.. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + +Args: + size (int...): a sequence of integers defining the shape of the output tensor. + Can be a variable number of arguments or a collection like a list or tuple. + +Keyword args: + {out} + {dtype} + {layout} + {device} + {requires_grad} + {pin_memory} + {memory_format} + +Example:: + + >>> torch.empty((2,3), dtype=torch.int64) + tensor([[ 9.4064e+13, 2.8000e+01, 9.3493e+13], + [ 7.5751e+18, 7.1428e+18, 7.5955e+18]]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.empty_like, + r""" +empty_like(input, *, dtype=None, layout=None, device=None, requires_grad=False, memory_format=torch.preserve_format) -> Tensor + +Returns an uninitialized tensor with the same size as :attr:`input`. +``torch.empty_like(input)`` is equivalent to +``torch.empty(input.size(), dtype=input.dtype, layout=input.layout, device=input.device)``. + +.. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + +Args: + {input} + +Keyword args: + {dtype} + {layout} + {device} + {requires_grad} + {memory_format} + +Example:: + + >>> a=torch.empty((2,3), dtype=torch.int32, device = 'cuda') + >>> torch.empty_like(a) + tensor([[0, 0, 0], + [0, 0, 0]], device='cuda:0', dtype=torch.int32) +""".format( + **factory_like_common_args + ), +) + +add_docstr( + torch.empty_strided, + r""" +empty_strided(size, stride, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor + +Creates a tensor with the specified :attr:`size` and :attr:`stride` and filled with undefined data. + +.. warning:: + If the constructed tensor is "overlapped" (with multiple indices referring to the same element + in memory) its behavior is undefined. + +.. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + +Args: + size (tuple of int): the shape of the output tensor + stride (tuple of int): the strides of the output tensor + +Keyword args: + {dtype} + {layout} + {device} + {requires_grad} + {pin_memory} + +Example:: + + >>> a = torch.empty_strided((2, 3), (1, 2)) + >>> a + tensor([[8.9683e-44, 4.4842e-44, 5.1239e+07], + [0.0000e+00, 0.0000e+00, 3.0705e-41]]) + >>> a.stride() + (1, 2) + >>> a.size() + torch.Size([2, 3]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.empty_permuted, + r""" +empty_permuted(size, physical_layout, *, dtype=None, layout=None, device=None, requires_grad=False, pin_memory=False) -> Tensor + +Creates an uninitialized, non-overlapping and dense tensor with the +specified :attr:`size`, with :attr:`physical_layout` specifying how the +dimensions are physically laid out in memory (each logical dimension is listed +from outermost to innermost). :attr:`physical_layout` is a generalization +of NCHW/NHWC notation: if each dimension is assigned a number according to +what order they occur in size (N=0, C=1, H=2, W=3), then NCHW is ``(0, 1, 2, 3)`` +while NHWC is ``(0, 2, 3, 1)``. Equivalently, the strides of the output +tensor ``t`` are such that ``t.stride(physical_layout[i]) == contiguous_strides[i]`` +(notably, this function is *not* equivalent to ``torch.empty(size).permute(physical_layout)``). + +Unlike :func:`torch.empty_strided`, this is guaranteed to produce a dense +tensor with no overlaps. If possible, prefer using this function over +:func:`torch.empty_strided` or manual use of :func:`torch.as_strided`. + +.. note:: + If :func:`torch.use_deterministic_algorithms()` and + :attr:`torch.utils.deterministic.fill_uninitialized_memory` are both set to + ``True``, the output tensor is initialized to prevent any possible + nondeterministic behavior from using the data as an input to an operation. + Floating point and complex tensors are filled with NaN, and integer tensors + are filled with the maximum value. + +Args: + size (tuple of int): the shape of the output tensor + physical_layout (tuple of int): the ordering of dimensions physically in memory + +Keyword args: + {dtype} + {layout} + {device} + {requires_grad} + {pin_memory} + +Examples: + + >>> torch.empty((2, 3, 5, 7)).stride() + (105, 35, 7, 1) + >>> torch.empty_permuted((2, 3, 5, 7), (0, 1, 2, 3)).stride() + (105, 35, 7, 1) + >>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).stride() + (105, 1, 21, 3) + >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).stride() + (105, 1, 21, 3) + >>> torch.empty_permuted((2, 3, 5, 7), (0, 2, 3, 1)).dim_order() + (0, 2, 3, 1) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.full, + r""" +full(size, fill_value, *, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False) -> Tensor + +Creates a tensor of size :attr:`size` filled with :attr:`fill_value`. The +tensor's dtype is inferred from :attr:`fill_value`. + +Args: + size (int...): a list, tuple, or :class:`torch.Size` of integers defining the + shape of the output tensor. + fill_value (Scalar): the value to fill the output tensor with. + +Keyword args: + {out} + {dtype} + {layout} + {device} + {requires_grad} + +Example:: + + >>> torch.full((2, 3), 3.141592) + tensor([[ 3.1416, 3.1416, 3.1416], + [ 3.1416, 3.1416, 3.1416]]) +""".format( + **factory_common_args + ), +) + +add_docstr( + torch.full_like, + """ +full_like(input, fill_value, \\*, dtype=None, layout=torch.strided, device=None, requires_grad=False, \ +memory_format=torch.preserve_format) -> Tensor + +Returns a tensor with the same size as :attr:`input` filled with :attr:`fill_value`. +``torch.full_like(input, fill_value)`` is equivalent to +``torch.full(input.size(), fill_value, dtype=input.dtype, layout=input.layout, device=input.device)``. + +Args: + {input} + fill_value: the number to fill the output tensor with. + +Keyword args: + {dtype} + {layout} + {device} + {requires_grad} + {memory_format} +""".format( + **factory_like_common_args + ), +) + +add_docstr( + torch.det, + r""" +det(input) -> Tensor + +Alias for :func:`torch.linalg.det` +""", +) + +add_docstr( + torch.where, + r""" +where(condition, input, other, *, out=None) -> Tensor + +Return a tensor of elements selected from either :attr:`input` or :attr:`other`, depending on :attr:`condition`. + +The operation is defined as: + +.. math:: + \text{out}_i = \begin{cases} + \text{input}_i & \text{if } \text{condition}_i \\ + \text{other}_i & \text{otherwise} \\ + \end{cases} +""" + + r""" +.. note:: + The tensors :attr:`condition`, :attr:`input`, :attr:`other` must be :ref:`broadcastable `. + +Arguments: + condition (BoolTensor): When True (nonzero), yield input, otherwise yield other + input (Tensor or Scalar): value (if :attr:`input` is a scalar) or values selected at indices + where :attr:`condition` is ``True`` + other (Tensor or Scalar): value (if :attr:`other` is a scalar) or values selected at indices + where :attr:`condition` is ``False`` + +Keyword args: + {out} + +Returns: + Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`input`, :attr:`other` + +Example:: + + >>> x = torch.randn(3, 2) + >>> y = torch.ones(3, 2) + >>> x + tensor([[-0.4620, 0.3139], + [ 0.3898, -0.7197], + [ 0.0478, -0.1657]]) + >>> torch.where(x > 0, 1.0, 0.0) + tensor([[0., 1.], + [1., 0.], + [1., 0.]]) + >>> torch.where(x > 0, x, y) + tensor([[ 1.0000, 0.3139], + [ 0.3898, 1.0000], + [ 0.0478, 1.0000]]) + >>> x = torch.randn(2, 2, dtype=torch.double) + >>> x + tensor([[ 1.0779, 0.0383], + [-0.8785, -1.1089]], dtype=torch.float64) + >>> torch.where(x > 0, x, 0.) + tensor([[1.0779, 0.0383], + [0.0000, 0.0000]], dtype=torch.float64) + +.. function:: where(condition) -> tuple of LongTensor + :noindex: + +``torch.where(condition)`` is identical to +``torch.nonzero(condition, as_tuple=True)``. + +.. note:: + See also :func:`torch.nonzero`. +""".format( + **common_args + ), +) + +add_docstr( + torch.logdet, + r""" +logdet(input) -> Tensor + +Calculates log determinant of a square matrix or batches of square matrices. + +It returns ``-inf`` if the input has a determinant of zero, and ``NaN`` if it has +a negative determinant. + +.. note:: + Backward through :meth:`logdet` internally uses SVD results when :attr:`input` + is not invertible. In this case, double backward through :meth:`logdet` will + be unstable in when :attr:`input` doesn't have distinct singular values. See + :func:`torch.linalg.svd` for details. + +.. seealso:: + + :func:`torch.linalg.slogdet` computes the sign (resp. angle) and natural logarithm of the + absolute value of the determinant of real-valued (resp. complex) square matrices. + +Arguments: + input (Tensor): the input tensor of size ``(*, n, n)`` where ``*`` is zero or more + batch dimensions. + +Example:: + + >>> A = torch.randn(3, 3) + >>> torch.det(A) + tensor(0.2611) + >>> torch.logdet(A) + tensor(-1.3430) + >>> A + tensor([[[ 0.9254, -0.6213], + [-0.5787, 1.6843]], + + [[ 0.3242, -0.9665], + [ 0.4539, -0.0887]], + + [[ 1.1336, -0.4025], + [-0.7089, 0.9032]]]) + >>> A.det() + tensor([1.1990, 0.4099, 0.7386]) + >>> A.det().log() + tensor([ 0.1815, -0.8917, -0.3031]) +""", +) + +add_docstr( + torch.slogdet, + r""" +slogdet(input) -> (Tensor, Tensor) + +Alias for :func:`torch.linalg.slogdet` +""", +) + +add_docstr( + torch.pinverse, + r""" +pinverse(input, rcond=1e-15) -> Tensor + +Alias for :func:`torch.linalg.pinv` +""", +) + +add_docstr( + torch.hann_window, + """ +hann_window(window_length, periodic=True, *, dtype=None, \ +layout=torch.strided, device=None, requires_grad=False) -> Tensor +""" + + r""" +Hann window function. + +.. math:: + w[n] = \frac{1}{2}\ \left[1 - \cos \left( \frac{2 \pi n}{N - 1} \right)\right] = + \sin^2 \left( \frac{\pi n}{N - 1} \right), + +where :math:`N` is the full window size. + +The input :attr:`window_length` is a positive integer controlling the +returned window size. :attr:`periodic` flag determines whether the returned +window trims off the last duplicate value from the symmetric window and is +ready to be used as a periodic window with functions like +:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in +above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have +``torch.hann_window(L, periodic=True)`` equal to +``torch.hann_window(L + 1, periodic=False)[:-1])``. + +.. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. +""" + + r""" +Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + +Keyword args: + {dtype} Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + {device} + {requires_grad} + +Returns: + Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window + +""".format( + **factory_common_args + ), +) + + +add_docstr( + torch.hamming_window, + """ +hamming_window(window_length, periodic=True, alpha=0.54, beta=0.46, *, dtype=None, \ +layout=torch.strided, device=None, requires_grad=False) -> Tensor +""" + + r""" +Hamming window function. + +.. math:: + w[n] = \alpha - \beta\ \cos \left( \frac{2 \pi n}{N - 1} \right), + +where :math:`N` is the full window size. + +The input :attr:`window_length` is a positive integer controlling the +returned window size. :attr:`periodic` flag determines whether the returned +window trims off the last duplicate value from the symmetric window and is +ready to be used as a periodic window with functions like +:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in +above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have +``torch.hamming_window(L, periodic=True)`` equal to +``torch.hamming_window(L + 1, periodic=False)[:-1])``. + +.. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. + +.. note:: + This is a generalized version of :meth:`torch.hann_window`. +""" + + r""" +Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + alpha (float, optional): The coefficient :math:`\alpha` in the equation above + beta (float, optional): The coefficient :math:`\beta` in the equation above + +Keyword args: + {dtype} Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + {device} + {requires_grad} + +Returns: + Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window. + +""".format( + **factory_common_args + ), +) + + +add_docstr( + torch.bartlett_window, + """ +bartlett_window(window_length, periodic=True, *, dtype=None, \ +layout=torch.strided, device=None, requires_grad=False) -> Tensor +""" + + r""" +Bartlett window function. + +.. math:: + w[n] = 1 - \left| \frac{2n}{N-1} - 1 \right| = \begin{cases} + \frac{2n}{N - 1} & \text{if } 0 \leq n \leq \frac{N - 1}{2} \\ + 2 - \frac{2n}{N - 1} & \text{if } \frac{N - 1}{2} < n < N \\ + \end{cases}, + +where :math:`N` is the full window size. + +The input :attr:`window_length` is a positive integer controlling the +returned window size. :attr:`periodic` flag determines whether the returned +window trims off the last duplicate value from the symmetric window and is +ready to be used as a periodic window with functions like +:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in +above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have +``torch.bartlett_window(L, periodic=True)`` equal to +``torch.bartlett_window(L + 1, periodic=False)[:-1])``. + +.. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. +""" + + r""" +Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + +Keyword args: + {dtype} Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + {device} + {requires_grad} + +Returns: + Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window + +""".format( + **factory_common_args + ), +) + + +add_docstr( + torch.blackman_window, + """ +blackman_window(window_length, periodic=True, *, dtype=None, \ +layout=torch.strided, device=None, requires_grad=False) -> Tensor +""" + + r""" +Blackman window function. + +.. math:: + w[n] = 0.42 - 0.5 \cos \left( \frac{2 \pi n}{N - 1} \right) + 0.08 \cos \left( \frac{4 \pi n}{N - 1} \right) + +where :math:`N` is the full window size. + +The input :attr:`window_length` is a positive integer controlling the +returned window size. :attr:`periodic` flag determines whether the returned +window trims off the last duplicate value from the symmetric window and is +ready to be used as a periodic window with functions like +:meth:`torch.stft`. Therefore, if :attr:`periodic` is true, the :math:`N` in +above formula is in fact :math:`\text{window\_length} + 1`. Also, we always have +``torch.blackman_window(L, periodic=True)`` equal to +``torch.blackman_window(L + 1, periodic=False)[:-1])``. + +.. note:: + If :attr:`window_length` :math:`=1`, the returned window contains a single value 1. +""" + + r""" +Arguments: + window_length (int): the size of returned window + periodic (bool, optional): If True, returns a window to be used as periodic + function. If False, return a symmetric window. + +Keyword args: + {dtype} Only floating point types are supported. + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + {device} + {requires_grad} + +Returns: + Tensor: A 1-D tensor of size :math:`(\text{{window\_length}},)` containing the window + +""".format( + **factory_common_args + ), +) + + +add_docstr( + torch.kaiser_window, + """ +kaiser_window(window_length, periodic=True, beta=12.0, *, dtype=None, \ +layout=torch.strided, device=None, requires_grad=False) -> Tensor +""" + + r""" +Computes the Kaiser window with window length :attr:`window_length` and shape parameter :attr:`beta`. + +Let I_0 be the zeroth order modified Bessel function of the first kind (see :func:`torch.i0`) and +``N = L - 1`` if :attr:`periodic` is False and ``L`` if :attr:`periodic` is True, +where ``L`` is the :attr:`window_length`. This function computes: + +.. math:: + out_i = I_0 \left( \beta \sqrt{1 - \left( {\frac{i - N/2}{N/2}} \right) ^2 } \right) / I_0( \beta ) + +Calling ``torch.kaiser_window(L, B, periodic=True)`` is equivalent to calling +``torch.kaiser_window(L + 1, B, periodic=False)[:-1])``. +The :attr:`periodic` argument is intended as a helpful shorthand +to produce a periodic window as input to functions like :func:`torch.stft`. + +.. note:: + If :attr:`window_length` is one, then the returned window is a single element tensor containing a one. + +""" + + r""" +Args: + window_length (int): length of the window. + periodic (bool, optional): If True, returns a periodic window suitable for use in spectral analysis. + If False, returns a symmetric window suitable for use in filter design. + beta (float, optional): shape parameter for the window. + +Keyword args: + {dtype} + layout (:class:`torch.layout`, optional): the desired layout of returned window tensor. Only + ``torch.strided`` (dense layout) is supported. + {device} + {requires_grad} + +""".format( + **factory_common_args + ), +) + + +add_docstr( + torch.vander, + """ +vander(x, N=None, increasing=False) -> Tensor +""" + + r""" +Generates a Vandermonde matrix. + +The columns of the output matrix are elementwise powers of the input vector :math:`x^{{(N-1)}}, x^{{(N-2)}}, ..., x^0`. +If increasing is True, the order of the columns is reversed :math:`x^0, x^1, ..., x^{{(N-1)}}`. Such a +matrix with a geometric progression in each row is named for Alexandre-Theophile Vandermonde. + +Arguments: + x (Tensor): 1-D input tensor. + N (int, optional): Number of columns in the output. If N is not specified, + a square array is returned :math:`(N = len(x))`. + increasing (bool, optional): Order of the powers of the columns. If True, + the powers increase from left to right, if False (the default) they are reversed. + +Returns: + Tensor: Vandermonde matrix. If increasing is False, the first column is :math:`x^{{(N-1)}}`, + the second :math:`x^{{(N-2)}}` and so forth. If increasing is True, the columns + are :math:`x^0, x^1, ..., x^{{(N-1)}}`. + +Example:: + + >>> x = torch.tensor([1, 2, 3, 5]) + >>> torch.vander(x) + tensor([[ 1, 1, 1, 1], + [ 8, 4, 2, 1], + [ 27, 9, 3, 1], + [125, 25, 5, 1]]) + >>> torch.vander(x, N=3) + tensor([[ 1, 1, 1], + [ 4, 2, 1], + [ 9, 3, 1], + [25, 5, 1]]) + >>> torch.vander(x, N=3, increasing=True) + tensor([[ 1, 1, 1], + [ 1, 2, 4], + [ 1, 3, 9], + [ 1, 5, 25]]) + +""".format( + **factory_common_args + ), +) + + +add_docstr( + torch.unbind, + r""" +unbind(input, dim=0) -> seq + +Removes a tensor dimension. + +Returns a tuple of all slices along a given dimension, already without it. + +Arguments: + input (Tensor): the tensor to unbind + dim (int): dimension to remove + +Example:: + + >>> torch.unbind(torch.tensor([[1, 2, 3], + >>> [4, 5, 6], + >>> [7, 8, 9]])) + (tensor([1, 2, 3]), tensor([4, 5, 6]), tensor([7, 8, 9])) +""", +) + + +add_docstr( + torch.combinations, + r""" +combinations(input, r=2, with_replacement=False) -> seq + +Compute combinations of length :math:`r` of the given tensor. The behavior is similar to +python's `itertools.combinations` when `with_replacement` is set to `False`, and +`itertools.combinations_with_replacement` when `with_replacement` is set to `True`. + +Arguments: + input (Tensor): 1D vector. + r (int, optional): number of elements to combine + with_replacement (bool, optional): whether to allow duplication in combination + +Returns: + Tensor: A tensor equivalent to converting all the input tensors into lists, do + `itertools.combinations` or `itertools.combinations_with_replacement` on these + lists, and finally convert the resulting list into tensor. + +Example:: + + >>> a = [1, 2, 3] + >>> list(itertools.combinations(a, r=2)) + [(1, 2), (1, 3), (2, 3)] + >>> list(itertools.combinations(a, r=3)) + [(1, 2, 3)] + >>> list(itertools.combinations_with_replacement(a, r=2)) + [(1, 1), (1, 2), (1, 3), (2, 2), (2, 3), (3, 3)] + >>> tensor_a = torch.tensor(a) + >>> torch.combinations(tensor_a) + tensor([[1, 2], + [1, 3], + [2, 3]]) + >>> torch.combinations(tensor_a, r=3) + tensor([[1, 2, 3]]) + >>> torch.combinations(tensor_a, with_replacement=True) + tensor([[1, 1], + [1, 2], + [1, 3], + [2, 2], + [2, 3], + [3, 3]]) + +""", +) + +add_docstr( + torch.trapezoid, + r""" +trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor + +Computes the `trapezoidal rule `_ along +:attr:`dim`. By default the spacing between elements is assumed to be 1, but +:attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be +used to specify arbitrary spacing along :attr:`dim`. + + +Assuming :attr:`y` is a one-dimensional tensor with elements :math:`{y_0, y_1, ..., y_n}`, +the default computation is + +.. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{1}{2} (y_i + y_{i-1}) + \end{aligned} + +When :attr:`dx` is specified the computation becomes + +.. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{\Delta x}{2} (y_i + y_{i-1}) + \end{aligned} + +effectively multiplying the result by :attr:`dx`. When :attr:`x` is specified, +assuming :attr:`x` is also a one-dimensional tensor with +elements :math:`{x_0, x_1, ..., x_n}`, the computation becomes + +.. math:: + \begin{aligned} + \sum_{i = 1}^{n-1} \frac{(x_i - x_{i-1})}{2} (y_i + y_{i-1}) + \end{aligned} + +When :attr:`x` and :attr:`y` have the same size, the computation is as described above and no broadcasting is needed. +The broadcasting behavior of this function is as follows when their sizes are different. For both :attr:`x` +and :attr:`y`, the function computes the difference between consecutive elements along +dimension :attr:`dim`. This effectively creates two tensors, `x_diff` and `y_diff`, that have +the same shape as the original tensors except their lengths along the dimension :attr:`dim` is reduced by 1. +After that, those two tensors are broadcast together to compute final output as part of the trapezoidal rule. +See the examples below for details. + +.. note:: + The trapezoidal rule is a technique for approximating the definite integral of a function + by averaging its left and right Riemann sums. The approximation becomes more accurate as + the resolution of the partition increases. + +Arguments: + y (Tensor): Values to use when computing the trapezoidal rule. + x (Tensor): If specified, defines spacing between values as specified above. + +Keyword arguments: + dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx` + are specified then this defaults to 1. Effectively multiplies the result by its value. + dim (int): The dimension along which to compute the trapezoidal rule. + The last (inner-most) dimension by default. + +Examples:: + + >>> # Computes the trapezoidal rule in 1D, spacing is implicitly 1 + >>> y = torch.tensor([1, 5, 10]) + >>> torch.trapezoid(y) + tensor(10.5) + + >>> # Computes the same trapezoidal rule directly to verify + >>> (1 + 10 + 10) / 2 + 10.5 + + >>> # Computes the trapezoidal rule in 1D with constant spacing of 2 + >>> # NOTE: the result is the same as before, but multiplied by 2 + >>> torch.trapezoid(y, dx=2) + 21.0 + + >>> # Computes the trapezoidal rule in 1D with arbitrary spacing + >>> x = torch.tensor([1, 3, 6]) + >>> torch.trapezoid(y, x) + 28.5 + + >>> # Computes the same trapezoidal rule directly to verify + >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2 + 28.5 + + >>> # Computes the trapezoidal rule for each row of a 3x3 matrix + >>> y = torch.arange(9).reshape(3, 3) + tensor([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> torch.trapezoid(y) + tensor([ 2., 8., 14.]) + + >>> # Computes the trapezoidal rule for each column of the matrix + >>> torch.trapezoid(y, dim=0) + tensor([ 6., 8., 10.]) + + >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with the same arbitrary spacing + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([1, 3, 6]) + >>> torch.trapezoid(y, x) + array([5., 5., 5.]) + + >>> # Computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with different arbitrary spacing per row + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]]) + >>> torch.trapezoid(y, x) + array([2., 4., 6.]) +""", +) + +add_docstr( + torch.trapz, + r""" +trapz(y, x, *, dim=-1) -> Tensor + +Alias for :func:`torch.trapezoid`. +""", +) + +add_docstr( + torch.cumulative_trapezoid, + r""" +cumulative_trapezoid(y, x=None, *, dx=None, dim=-1) -> Tensor + +Cumulatively computes the `trapezoidal rule `_ +along :attr:`dim`. By default the spacing between elements is assumed to be 1, but +:attr:`dx` can be used to specify a different constant spacing, and :attr:`x` can be +used to specify arbitrary spacing along :attr:`dim`. + +For more details, please read :func:`torch.trapezoid`. The difference between :func:`torch.trapezoid` +and this function is that, :func:`torch.trapezoid` returns a value for each integration, +where as this function returns a cumulative value for every spacing within the integration. This +is analogous to how `.sum` returns a value and `.cumsum` returns a cumulative sum. + +Arguments: + y (Tensor): Values to use when computing the trapezoidal rule. + x (Tensor): If specified, defines spacing between values as specified above. + +Keyword arguments: + dx (float): constant spacing between values. If neither :attr:`x` or :attr:`dx` + are specified then this defaults to 1. Effectively multiplies the result by its value. + dim (int): The dimension along which to compute the trapezoidal rule. + The last (inner-most) dimension by default. + +Examples:: + + >>> # Cumulatively computes the trapezoidal rule in 1D, spacing is implicitly 1. + >>> y = torch.tensor([1, 5, 10]) + >>> torch.cumulative_trapezoid(y) + tensor([3., 10.5]) + + >>> # Computes the same trapezoidal rule directly up to each element to verify + >>> (1 + 5) / 2 + 3.0 + >>> (1 + 10 + 10) / 2 + 10.5 + + >>> # Cumulatively computes the trapezoidal rule in 1D with constant spacing of 2 + >>> # NOTE: the result is the same as before, but multiplied by 2 + >>> torch.cumulative_trapezoid(y, dx=2) + tensor([6., 21.]) + + >>> # Cumulatively computes the trapezoidal rule in 1D with arbitrary spacing + >>> x = torch.tensor([1, 3, 6]) + >>> torch.cumulative_trapezoid(y, x) + tensor([6., 28.5]) + + >>> # Computes the same trapezoidal rule directly up to each element to verify + >>> ((3 - 1) * (1 + 5)) / 2 + 6.0 + >>> ((3 - 1) * (1 + 5) + (6 - 3) * (5 + 10)) / 2 + 28.5 + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 matrix + >>> y = torch.arange(9).reshape(3, 3) + tensor([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> torch.cumulative_trapezoid(y) + tensor([[ 0.5, 2.], + [ 3.5, 8.], + [ 6.5, 14.]]) + + >>> # Cumulatively computes the trapezoidal rule for each column of the matrix + >>> torch.cumulative_trapezoid(y, dim=0) + tensor([[ 1.5, 2.5, 3.5], + [ 6.0, 8.0, 10.0]]) + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with the same arbitrary spacing + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([1, 3, 6]) + >>> torch.cumulative_trapezoid(y, x) + tensor([[2., 5.], + [2., 5.], + [2., 5.]]) + + >>> # Cumulatively computes the trapezoidal rule for each row of a 3x3 ones matrix + >>> # with different arbitrary spacing per row + >>> y = torch.ones(3, 3) + >>> x = torch.tensor([[1, 2, 3], [1, 3, 5], [1, 4, 7]]) + >>> torch.cumulative_trapezoid(y, x) + tensor([[1., 2.], + [2., 4.], + [3., 6.]]) +""", +) + +add_docstr( + torch.repeat_interleave, + r""" +repeat_interleave(input, repeats, dim=None, *, output_size=None) -> Tensor + +Repeat elements of a tensor. + +.. warning:: + + This is different from :meth:`torch.Tensor.repeat` but similar to ``numpy.repeat``. + +Args: + {input} + repeats (Tensor or int): The number of repetitions for each element. + repeats is broadcasted to fit the shape of the given axis. + dim (int, optional): The dimension along which to repeat values. + By default, use the flattened input array, and return a flat output + array. + +Keyword args: + output_size (int, optional): Total output size for the given axis + ( e.g. sum of repeats). If given, it will avoid stream synchronization + needed to calculate output shape of the tensor. + +Returns: + Tensor: Repeated tensor which has the same shape as input, except along the given axis. + +Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> x.repeat_interleave(2) + tensor([1, 1, 2, 2, 3, 3]) + >>> y = torch.tensor([[1, 2], [3, 4]]) + >>> torch.repeat_interleave(y, 2) + tensor([1, 1, 2, 2, 3, 3, 4, 4]) + >>> torch.repeat_interleave(y, 3, dim=1) + tensor([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0) + tensor([[1, 2], + [3, 4], + [3, 4]]) + >>> torch.repeat_interleave(y, torch.tensor([1, 2]), dim=0, output_size=3) + tensor([[1, 2], + [3, 4], + [3, 4]]) + +If the `repeats` is `tensor([n1, n2, n3, ...])`, then the output will be +`tensor([0, 0, ..., 1, 1, ..., 2, 2, ..., ...])` where `0` appears `n1` times, +`1` appears `n2` times, `2` appears `n3` times, etc. + +.. function:: repeat_interleave(repeats, *) -> Tensor + :noindex: + +Repeats 0 repeats[0] times, 1 repeats[1] times, 2 repeats[2] times, etc. + +Args: + repeats (Tensor): The number of repetitions for each element. + +Returns: + Tensor: Repeated tensor of size `sum(repeats)`. + +Example:: + + >>> torch.repeat_interleave(torch.tensor([1, 2, 3])) + tensor([0, 1, 1, 2, 2, 2]) + +""".format( + **common_args + ), +) + +add_docstr( + torch.tile, + r""" +tile(input, dims) -> Tensor + +Constructs a tensor by repeating the elements of :attr:`input`. +The :attr:`dims` argument specifies the number of repetitions +in each dimension. + +If :attr:`dims` specifies fewer dimensions than :attr:`input` has, then +ones are prepended to :attr:`dims` until all dimensions are specified. +For example, if :attr:`input` has shape (8, 6, 4, 2) and :attr:`dims` +is (2, 2), then :attr:`dims` is treated as (1, 1, 2, 2). + +Analogously, if :attr:`input` has fewer dimensions than :attr:`dims` +specifies, then :attr:`input` is treated as if it were unsqueezed at +dimension zero until it has as many dimensions as :attr:`dims` specifies. +For example, if :attr:`input` has shape (4, 2) and :attr:`dims` +is (3, 3, 2, 2), then :attr:`input` is treated as if it had the +shape (1, 1, 4, 2). + +.. note:: + + This function is similar to NumPy's tile function. + +Args: + input (Tensor): the tensor whose elements to repeat. + dims (tuple): the number of repetitions per dimension. + +Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> x.tile((2,)) + tensor([1, 2, 3, 1, 2, 3]) + >>> y = torch.tensor([[1, 2], [3, 4]]) + >>> torch.tile(y, (2, 2)) + tensor([[1, 2, 1, 2], + [3, 4, 3, 4], + [1, 2, 1, 2], + [3, 4, 3, 4]]) +""", +) + +add_docstr( + torch.quantize_per_tensor, + r""" +quantize_per_tensor(input, scale, zero_point, dtype) -> Tensor + +Converts a float tensor to a quantized tensor with given scale and zero point. + +Arguments: + input (Tensor): float tensor or list of tensors to quantize + scale (float or Tensor): scale to apply in quantization formula + zero_point (int or Tensor): offset in integer value that maps to float zero + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32`` + +Returns: + Tensor: A newly quantized tensor or list of quantized tensors. + +Example:: + + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8).int_repr() + tensor([ 0, 10, 20, 30], dtype=torch.uint8) + >>> torch.quantize_per_tensor([torch.tensor([-1.0, 0.0]), torch.tensor([-2.0, 2.0])], + >>> torch.tensor([0.1, 0.2]), torch.tensor([10, 20]), torch.quint8) + (tensor([-1., 0.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.1, zero_point=10), + tensor([-2., 2.], size=(2,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=20)) + >>> torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.tensor(0.1), torch.tensor(10), torch.quint8) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.10, zero_point=10) +""", +) + +add_docstr( + torch.quantize_per_tensor_dynamic, + r""" +quantize_per_tensor_dynamic(input, dtype, reduce_range) -> Tensor + +Converts a float tensor to a quantized tensor with scale and zero_point calculated +dynamically based on the input. + +Arguments: + input (Tensor): float tensor or list of tensors to quantize + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8`` + reduce_range (bool): a flag to indicate whether to reduce the range of quantized + data by 1 bit, it's required to avoid instruction overflow for some hardwares + +Returns: + Tensor: A newly (dynamically) quantized tensor + +Example:: + + >>> t = torch.quantize_per_tensor_dynamic(torch.tensor([-1.0, 0.0, 1.0, 2.0]), torch.quint8, False) + >>> print(t) + tensor([-1., 0., 1., 2.], size=(4,), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.011764705882352941, + zero_point=85) + >>> t.int_repr() + tensor([ 0, 85, 170, 255], dtype=torch.uint8) +""", +) + +add_docstr( + torch.quantize_per_channel, + r""" +quantize_per_channel(input, scales, zero_points, axis, dtype) -> Tensor + +Converts a float tensor to a per-channel quantized tensor with given scales and zero points. + +Arguments: + input (Tensor): float tensor to quantize + scales (Tensor): float 1D tensor of scales to use, size should match ``input.size(axis)`` + zero_points (int): integer 1D tensor of offset to use, size should match ``input.size(axis)`` + axis (int): dimension on which apply per-channel quantization + dtype (:class:`torch.dtype`): the desired data type of returned tensor. + Has to be one of the quantized dtypes: ``torch.quint8``, ``torch.qint8``, ``torch.qint32`` + +Returns: + Tensor: A newly quantized tensor + +Example:: + + >>> x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]]) + >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8) + tensor([[-1., 0.], + [ 1., 2.]], size=(2, 2), dtype=torch.quint8, + quantization_scheme=torch.per_channel_affine, + scale=tensor([0.1000, 0.0100], dtype=torch.float64), + zero_point=tensor([10, 0]), axis=0) + >>> torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8).int_repr() + tensor([[ 0, 10], + [100, 200]], dtype=torch.uint8) +""", +) + + +add_docstr( + torch.quantized_batch_norm, + r""" +quantized_batch_norm(input, weight=None, bias=None, mean, var, eps, output_scale, output_zero_point) -> Tensor + +Applies batch normalization on a 4D (NCHW) quantized tensor. + +.. math:: + + y = \frac{x - \mathrm{E}[x]}{\sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta + +Arguments: + input (Tensor): quantized tensor + weight (Tensor): float tensor that corresponds to the gamma, size C + bias (Tensor): float tensor that corresponds to the beta, size C + mean (Tensor): float mean value in batch normalization, size C + var (Tensor): float tensor for variance, size C + eps (float): a value added to the denominator for numerical stability. + output_scale (float): output quantized tensor scale + output_zero_point (int): output quantized tensor zero_point + +Returns: + Tensor: A quantized tensor with batch normalization applied. + +Example:: + + >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8) + >>> torch.quantized_batch_norm(qx, torch.ones(2), torch.zeros(2), torch.rand(2), torch.rand(2), 0.00001, 0.2, 2) + tensor([[[[-0.2000, -0.2000], + [ 1.6000, -0.2000]], + + [[-0.4000, -0.4000], + [-0.4000, 0.6000]]], + + + [[[-0.2000, -0.2000], + [-0.2000, -0.2000]], + + [[ 0.6000, -0.4000], + [ 0.6000, -0.4000]]]], size=(2, 2, 2, 2), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=0.2, zero_point=2) +""", +) + + +add_docstr( + torch.quantized_max_pool1d, + r""" +quantized_max_pool1d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor + +Applies a 1D max pooling over an input quantized tensor composed of several input planes. + +Arguments: + input (Tensor): quantized tensor + kernel_size (list of int): the size of the sliding window + stride (``list of int``, optional): the stride of the sliding window + padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2 + dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1 + ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape. + Defaults to False. + + +Returns: + Tensor: A quantized tensor with max_pool1d applied. + +Example:: + + >>> qx = torch.quantize_per_tensor(torch.rand(2, 2), 1.5, 3, torch.quint8) + >>> torch.quantized_max_pool1d(qx, [2]) + tensor([[0.0000], + [1.5000]], size=(2, 1), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3) +""", +) + + +add_docstr( + torch.quantized_max_pool2d, + r""" +quantized_max_pool2d(input, kernel_size, stride=[], padding=0, dilation=1, ceil_mode=False) -> Tensor + +Applies a 2D max pooling over an input quantized tensor composed of several input planes. + +Arguments: + input (Tensor): quantized tensor + kernel_size (``list of int``): the size of the sliding window + stride (``list of int``, optional): the stride of the sliding window + padding (``list of int``, optional): padding to be added on both sides, must be >= 0 and <= kernel_size / 2 + dilation (``list of int``, optional): The stride between elements within a sliding window, must be > 0. Default 1 + ceil_mode (bool, optional): If True, will use ceil instead of floor to compute the output shape. + Defaults to False. + + +Returns: + Tensor: A quantized tensor with max_pool2d applied. + +Example:: + + >>> qx = torch.quantize_per_tensor(torch.rand(2, 2, 2, 2), 1.5, 3, torch.quint8) + >>> torch.quantized_max_pool2d(qx, [2,2]) + tensor([[[[1.5000]], + + [[1.5000]]], + + + [[[0.0000]], + + [[0.0000]]]], size=(2, 2, 1, 1), dtype=torch.quint8, + quantization_scheme=torch.per_tensor_affine, scale=1.5, zero_point=3) +""", +) + + +add_docstr( + torch.Generator, + r""" +Generator(device='cpu') -> Generator + +Creates and returns a generator object that manages the state of the algorithm which +produces pseudo random numbers. Used as a keyword argument in many :ref:`inplace-random-sampling` +functions. + +Arguments: + device (:class:`torch.device`, optional): the desired device for the generator. + +Returns: + Generator: An torch.Generator object. + +Example:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> g_cpu = torch.Generator() + >>> g_cuda = torch.Generator(device='cuda') +""", +) + + +add_docstr( + torch.Generator.set_state, + r""" +Generator.set_state(new_state) -> void + +Sets the Generator state. + +Arguments: + new_state (torch.ByteTensor): The desired state. + +Example:: + + >>> g_cpu = torch.Generator() + >>> g_cpu_other = torch.Generator() + >>> g_cpu.set_state(g_cpu_other.get_state()) +""", +) + + +add_docstr( + torch.Generator.get_state, + r""" +Generator.get_state() -> Tensor + +Returns the Generator state as a ``torch.ByteTensor``. + +Returns: + Tensor: A ``torch.ByteTensor`` which contains all the necessary bits + to restore a Generator to a specific point in time. + +Example:: + + >>> g_cpu = torch.Generator() + >>> g_cpu.get_state() +""", +) + + +add_docstr( + torch.Generator.manual_seed, + r""" +Generator.manual_seed(seed) -> Generator + +Sets the seed for generating random numbers. Returns a `torch.Generator` object. Any 32-bit integer is a valid seed. + +Arguments: + seed (int): The desired seed. Value must be within the inclusive range + `[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError + is raised. Negative inputs are remapped to positive values with the formula + `0xffff_ffff_ffff_ffff + seed`. + +Returns: + Generator: An torch.Generator object. + +Example:: + + >>> g_cpu = torch.Generator() + >>> g_cpu.manual_seed(2147483647) +""", +) + + +add_docstr( + torch.Generator.initial_seed, + r""" +Generator.initial_seed() -> int + +Returns the initial seed for generating random numbers. + +Example:: + + >>> g_cpu = torch.Generator() + >>> g_cpu.initial_seed() + 2147483647 +""", +) + + +add_docstr( + torch.Generator.seed, + r""" +Generator.seed() -> int + +Gets a non-deterministic random number from std::random_device or the current +time and uses it to seed a Generator. + +Example:: + + >>> g_cpu = torch.Generator() + >>> g_cpu.seed() + 1516516984916 +""", +) + + +add_docstr( + torch.Generator.device, + r""" +Generator.device -> device + +Gets the current device of the generator. + +Example:: + + >>> g_cpu = torch.Generator() + >>> g_cpu.device + device(type='cpu') +""", +) + +add_docstr( + torch._assert_async, + r""" +_assert_async(tensor) -> void + +Asynchronously assert that the contents of tensor are nonzero. For CPU tensors, +this is equivalent to ``assert tensor`` or ``assert tensor.is_nonzero()``; for +CUDA tensors, we DO NOT synchronize and you may only find out the assertion +failed at a later CUDA kernel launch. Asynchronous assertion can be helpful for +testing invariants in CUDA tensors without giving up performance. This function +is NOT intended to be used for regular error checking, as it will trash your CUDA +context if the assert fails (forcing you to restart your PyTorch process.) + +Args: + tensor (Tensor): a one element tensor to test to see if it is nonzero. Zero + elements (including False for boolean tensors) cause an assertion failure + to be raised. +""", +) + +add_docstr( + torch.searchsorted, + r""" +searchsorted(sorted_sequence, values, *, out_int32=False, right=False, side=None, out=None, sorter=None) -> Tensor + +Find the indices from the *innermost* dimension of :attr:`sorted_sequence` such that, if the +corresponding values in :attr:`values` were inserted before the indices, when sorted, the order +of the corresponding *innermost* dimension within :attr:`sorted_sequence` would be preserved. +Return a new tensor with the same size as :attr:`values`. More formally, +the returned index satisfies the following rules: + +.. list-table:: + :widths: 12 10 78 + :header-rows: 1 + + * - :attr:`sorted_sequence` + - :attr:`right` + - *returned index satisfies* + * - 1-D + - False + - ``sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]`` + * - 1-D + - True + - ``sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]`` + * - N-D + - False + - ``sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] <= sorted_sequence[m][n]...[l][i]`` + * - N-D + - True + - ``sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] < sorted_sequence[m][n]...[l][i]`` + +Args: + sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the *innermost* + dimension unless :attr:`sorter` is provided, in which case the sequence does not + need to be sorted + values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s). + +Keyword args: + out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise. + Default value is False, i.e. default output data type is torch.int64. + right (bool, optional): if False, return the first suitable location that is found. If True, return the + last such index. If no suitable index found, return 0 for non-numerical value + (eg. nan, inf) or the size of *innermost* dimension within :attr:`sorted_sequence` + (one pass the last index of the *innermost* dimension). In other words, if False, + gets the lower bound index for each value in :attr:`values` on the corresponding + *innermost* dimension of the :attr:`sorted_sequence`. If True, gets the upper + bound index instead. Default value is False. :attr:`side` does the same and is + preferred. It will error if :attr:`side` is set to "left" while this is True. + side (str, optional): the same as :attr:`right` but preferred. "left" corresponds to False for :attr:`right` + and "right" corresponds to True for :attr:`right`. It will error if this is set to + "left" while :attr:`right` is True. Default value is None. + out (Tensor, optional): the output tensor, must be the same size as :attr:`values` if provided. + sorter (LongTensor, optional): if provided, a tensor matching the shape of the unsorted + :attr:`sorted_sequence` containing a sequence of indices that sort it in the + ascending order on the innermost dimension + + +Example:: + + >>> sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]]) + >>> sorted_sequence + tensor([[ 1, 3, 5, 7, 9], + [ 2, 4, 6, 8, 10]]) + >>> values = torch.tensor([[3, 6, 9], [3, 6, 9]]) + >>> values + tensor([[3, 6, 9], + [3, 6, 9]]) + >>> torch.searchsorted(sorted_sequence, values) + tensor([[1, 3, 4], + [1, 2, 4]]) + >>> torch.searchsorted(sorted_sequence, values, side='right') + tensor([[2, 3, 5], + [1, 3, 4]]) + + >>> sorted_sequence_1d = torch.tensor([1, 3, 5, 7, 9]) + >>> sorted_sequence_1d + tensor([1, 3, 5, 7, 9]) + >>> torch.searchsorted(sorted_sequence_1d, values) + tensor([[1, 3, 4], + [1, 3, 4]]) +""", +) + +add_docstr( + torch.bucketize, + r""" +bucketize(input, boundaries, *, out_int32=False, right=False, out=None) -> Tensor + +Returns the indices of the buckets to which each value in the :attr:`input` belongs, where the +boundaries of the buckets are set by :attr:`boundaries`. Return a new tensor with the same size +as :attr:`input`. If :attr:`right` is False (default), then the left boundary is open. Note that +this behavior is opposite the behavior of +`numpy.digitize `_. +More formally, the returned index satisfies the following rules: + +.. list-table:: + :widths: 15 85 + :header-rows: 1 + + * - :attr:`right` + - *returned index satisfies* + * - False + - ``boundaries[i-1] < input[m][n]...[l][x] <= boundaries[i]`` + * - True + - ``boundaries[i-1] <= input[m][n]...[l][x] < boundaries[i]`` + +Args: + input (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s). + boundaries (Tensor): 1-D tensor, must contain a strictly increasing sequence, or the return value is undefined. + +Keyword args: + out_int32 (bool, optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise. + Default value is False, i.e. default output data type is torch.int64. + right (bool, optional): if False, return the first suitable location that is found. If True, return the + last such index. If no suitable index found, return 0 for non-numerical value + (eg. nan, inf) or the size of :attr:`boundaries` (one pass the last index). + In other words, if False, gets the lower bound index for each value in :attr:`input` + from :attr:`boundaries`. If True, gets the upper bound index instead. + Default value is False. + out (Tensor, optional): the output tensor, must be the same size as :attr:`input` if provided. + + +Example:: + + >>> boundaries = torch.tensor([1, 3, 5, 7, 9]) + >>> boundaries + tensor([1, 3, 5, 7, 9]) + >>> v = torch.tensor([[3, 6, 9], [3, 6, 9]]) + >>> v + tensor([[3, 6, 9], + [3, 6, 9]]) + >>> torch.bucketize(v, boundaries) + tensor([[1, 3, 4], + [1, 3, 4]]) + >>> torch.bucketize(v, boundaries, right=True) + tensor([[2, 3, 5], + [2, 3, 5]]) +""", +) + +add_docstr( + torch.view_as_real_copy, + r""" +Performs the same operation as :func:`torch.view_as_real`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.view_as_complex_copy, + r""" +Performs the same operation as :func:`torch.view_as_complex`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.as_strided_copy, + r""" +Performs the same operation as :func:`torch.as_strided`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.diagonal_copy, + r""" +Performs the same operation as :func:`torch.diagonal`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.expand_copy, + r""" +Performs the same operation as :func:`torch.expand`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.permute_copy, + r""" +Performs the same operation as :func:`torch.permute`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.select_copy, + r""" +Performs the same operation as :func:`torch.select`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.detach_copy, + r""" +Performs the same operation as :func:`torch.detach`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.slice_copy, + r""" +Performs the same operation as :func:`torch.slice`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.split_copy, + r""" +Performs the same operation as :func:`torch.split`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.split_with_sizes_copy, + r""" +Performs the same operation as :func:`torch.split_with_sizes`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.squeeze_copy, + r""" +Performs the same operation as :func:`torch.squeeze`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.t_copy, + r""" +Performs the same operation as :func:`torch.t`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.transpose_copy, + r""" +Performs the same operation as :func:`torch.transpose`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.unsqueeze_copy, + r""" +Performs the same operation as :func:`torch.unsqueeze`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.indices_copy, + r""" +Performs the same operation as :func:`torch.indices`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.values_copy, + r""" +Performs the same operation as :func:`torch.values`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.crow_indices_copy, + r""" +Performs the same operation as :func:`torch.crow_indices`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.col_indices_copy, + r""" +Performs the same operation as :func:`torch.col_indices`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.unbind_copy, + r""" +Performs the same operation as :func:`torch.unbind`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.view_copy, + r""" +Performs the same operation as :func:`torch.view`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.unfold_copy, + r""" +Performs the same operation as :func:`torch.unfold`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +add_docstr( + torch.alias_copy, + r""" +Performs the same operation as :func:`torch.alias`, but all output tensors +are freshly created instead of aliasing the input. +""", +) + +for unary_base_func_name in ( + "exp", + "sqrt", + "abs", + "acos", + "asin", + "atan", + "ceil", + "cos", + "cosh", + "erf", + "erfc", + "expm1", + "floor", + "log", + "log10", + "log1p", + "log2", + "neg", + "tan", + "tanh", + "sin", + "sinh", + "round", + "lgamma", + "frac", + "reciprocal", + "sigmoid", + "trunc", + "zero", +): + unary_foreach_func_name = f"_foreach_{unary_base_func_name}" + if hasattr(torch, unary_foreach_func_name): + add_docstr( + getattr(torch, unary_foreach_func_name), + rf""" +{unary_foreach_func_name}(self: List[Tensor]) -> List[Tensor] + +Apply :func:`torch.{unary_base_func_name}` to each Tensor of the input list. + """, + ) + unary_inplace_foreach_func_name = f"{unary_foreach_func_name}_" + if hasattr(torch, unary_inplace_foreach_func_name): + add_docstr( + getattr(torch, unary_inplace_foreach_func_name), + rf""" +{unary_inplace_foreach_func_name}(self: List[Tensor]) -> None + +Apply :func:`torch.{unary_base_func_name}` to each Tensor of the input list. + """, + ) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/functional.py b/llmeval-env/lib/python3.10/site-packages/torch/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..7c07ae348631b50612823c5d913d075d3aa23fe7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/functional.py @@ -0,0 +1,1983 @@ +from typing import ( + List, Tuple, Optional, Union, Any, Sequence, TYPE_CHECKING +) +import operator +import itertools + +import torch +from torch._C import _add_docstr +import torch.nn.functional as F +from ._lowrank import svd_lowrank, pca_lowrank +from .overrides import ( + has_torch_function, has_torch_function_unary, has_torch_function_variadic, + handle_torch_function) +from ._jit_internal import boolean_dispatch +from ._jit_internal import _overload as overload + +Tensor = torch.Tensor +from torch import _VF + +__all__ = [ + 'atleast_1d', + 'atleast_2d', + 'atleast_3d', + 'align_tensors', + 'broadcast_shapes', + 'broadcast_tensors', + 'cartesian_prod', + 'block_diag', + 'cdist', + 'chain_matmul', + 'einsum', + 'istft', + 'lu', + 'norm', + 'meshgrid', + 'pca_lowrank', + 'split', + 'stft', + 'svd_lowrank', + 'tensordot', + 'unique', + 'unique_consecutive', + 'unravel_index', +] + + +def broadcast_tensors(*tensors): + r"""broadcast_tensors(*tensors) -> List of Tensors + + Broadcasts the given tensors according to :ref:`broadcasting-semantics`. + + Args: + *tensors: any number of tensors of the same type + + .. warning:: + + More than one element of a broadcasted tensor may refer to a single + memory location. As a result, in-place operations (especially ones that + are vectorized) may result in incorrect behavior. If you need to write + to the tensors, please clone them first. + + Example:: + + >>> x = torch.arange(3).view(1, 3) + >>> y = torch.arange(2).view(2, 1) + >>> a, b = torch.broadcast_tensors(x, y) + >>> a.size() + torch.Size([2, 3]) + >>> a + tensor([[0, 1, 2], + [0, 1, 2]]) + """ + # This wrapper exists to support variadic args. + if has_torch_function(tensors): + return handle_torch_function(broadcast_tensors, tensors, *tensors) + return _VF.broadcast_tensors(tensors) # type: ignore[attr-defined] + + +def broadcast_shapes(*shapes): + r"""broadcast_shapes(*shapes) -> Size + + Similar to :func:`broadcast_tensors` but for shapes. + + This is equivalent to + ``torch.broadcast_tensors(*map(torch.empty, shapes))[0].shape`` + but avoids the need create to intermediate tensors. This is useful for + broadcasting tensors of common batch shape but different rightmost shape, + e.g. to broadcast mean vectors with covariance matrices. + + Example:: + + >>> torch.broadcast_shapes((2,), (3, 1), (1, 1, 1)) + torch.Size([1, 3, 2]) + + Args: + \*shapes (torch.Size): Shapes of tensors. + + Returns: + shape (torch.Size): A shape compatible with all input shapes. + + Raises: + RuntimeError: If shapes are incompatible. + """ + # This wrapper exists to support variadic args. + # TODO Move this to C++ once the jit has better support for torch.Size. + if not torch.jit.is_tracing(): + max_len = 0 + for shape in shapes: + if isinstance(shape, (int, torch.SymInt)): + if max_len < 1: + max_len = 1 + elif isinstance(shape, (tuple, list)): + s = len(shape) + if max_len < s: + max_len = s + result = [1] * max_len + + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + for shape in shapes: + if isinstance(shape, (int, torch.SymInt)): + shape = (shape,) + if isinstance(shape, (tuple, list)): + for i in range(-1, -1 - len(shape), -1): + if shape[i] < 0: + raise RuntimeError(f"Trying to create tensor with negative dimension ({shape[i]}): ({shape[i]})") + # NB: result is initialized to 1 so this is effectively an + # equals one test + if guard_size_oblivious(shape[i] == 1) or guard_size_oblivious(shape[i] == result[i]): + continue + if result[i] != 1: + raise RuntimeError("Shape mismatch: objects cannot be broadcast to a single shape") + result[i] = shape[i] + else: + raise RuntimeError("Input shapes should be of type ints, a tuple of ints, or a list of ints, got ", shape) + return torch.Size(result) + else: + # with implementation above, torch.jit.trace hardcodes the sizes which makes subsequent replays fail + with torch.no_grad(): + scalar = torch.zeros((), device="cpu") + tensors = [scalar.expand(shape) for shape in shapes] + tensors = broadcast_tensors(*tensors) + return tensors[0].shape + + +def split( + tensor: Tensor, split_size_or_sections: Union[int, List[int]], dim: int = 0 +) -> Tuple[Tensor, ...]: + r"""Splits the tensor into chunks. Each chunk is a view of the original tensor. + + If :attr:`split_size_or_sections` is an integer type, then :attr:`tensor` will + be split into equally sized chunks (if possible). Last chunk will be smaller if + the tensor size along the given dimension :attr:`dim` is not divisible by + :attr:`split_size`. + + If :attr:`split_size_or_sections` is a list, then :attr:`tensor` will be split + into ``len(split_size_or_sections)`` chunks with sizes in :attr:`dim` according + to :attr:`split_size_or_sections`. + + Args: + tensor (Tensor): tensor to split. + split_size_or_sections (int) or (list(int)): size of a single chunk or + list of sizes for each chunk + dim (int): dimension along which to split the tensor. + + Example:: + + >>> a = torch.arange(10).reshape(5, 2) + >>> a + tensor([[0, 1], + [2, 3], + [4, 5], + [6, 7], + [8, 9]]) + >>> torch.split(a, 2) + (tensor([[0, 1], + [2, 3]]), + tensor([[4, 5], + [6, 7]]), + tensor([[8, 9]])) + >>> torch.split(a, [1, 4]) + (tensor([[0, 1]]), + tensor([[2, 3], + [4, 5], + [6, 7], + [8, 9]])) + """ + if has_torch_function_unary(tensor): + return handle_torch_function( + split, (tensor,), tensor, split_size_or_sections, dim=dim) + # Overwriting reason: + # This dispatches to two ATen functions depending on the type of + # split_size_or_sections. The branching code is in _tensor.py, which we + # call here. + return tensor.split(split_size_or_sections, dim) + + +def einsum(*args: Any) -> Tensor: + r"""einsum(equation, *operands) -> Tensor + + Sums the product of the elements of the input :attr:`operands` along dimensions specified using a notation + based on the Einstein summation convention. + + Einsum allows computing many common multi-dimensional linear algebraic array operations by representing them + in a short-hand format based on the Einstein summation convention, given by :attr:`equation`. The details of + this format are described below, but the general idea is to label every dimension of the input :attr:`operands` + with some subscript and define which subscripts are part of the output. The output is then computed by summing + the product of the elements of the :attr:`operands` along the dimensions whose subscripts are not part of the + output. For example, matrix multiplication can be computed using einsum as `torch.einsum("ij,jk->ik", A, B)`. + Here, j is the summation subscript and i and k the output subscripts (see section below for more details on why). + + Equation: + + The :attr:`equation` string specifies the subscripts (letters in `[a-zA-Z]`) for each dimension of + the input :attr:`operands` in the same order as the dimensions, separating subscripts for each operand by a + comma (','), e.g. `'ij,jk'` specify subscripts for two 2D operands. The dimensions labeled with the same subscript + must be broadcastable, that is, their size must either match or be `1`. The exception is if a subscript is + repeated for the same input operand, in which case the dimensions labeled with this subscript for this operand + must match in size and the operand will be replaced by its diagonal along these dimensions. The subscripts that + appear exactly once in the :attr:`equation` will be part of the output, sorted in increasing alphabetical order. + The output is computed by multiplying the input :attr:`operands` element-wise, with their dimensions aligned based + on the subscripts, and then summing out the dimensions whose subscripts are not part of the output. + + Optionally, the output subscripts can be explicitly defined by adding an arrow ('->') at the end of the equation + followed by the subscripts for the output. For instance, the following equation computes the transpose of a + matrix multiplication: 'ij,jk->ki'. The output subscripts must appear at least once for some input operand and + at most once for the output. + + Ellipsis ('...') can be used in place of subscripts to broadcast the dimensions covered by the ellipsis. + Each input operand may contain at most one ellipsis which will cover the dimensions not covered by subscripts, + e.g. for an input operand with 5 dimensions, the ellipsis in the equation `'ab...c'` cover the third and fourth + dimensions. The ellipsis does not need to cover the same number of dimensions across the :attr:`operands` but the + 'shape' of the ellipsis (the size of the dimensions covered by them) must broadcast together. If the output is not + explicitly defined with the arrow ('->') notation, the ellipsis will come first in the output (left-most dimensions), + before the subscript labels that appear exactly once for the input operands. e.g. the following equation implements + batch matrix multiplication `'...ij,...jk'`. + + A few final notes: the equation may contain whitespaces between the different elements (subscripts, ellipsis, + arrow and comma) but something like `'. . .'` is not valid. An empty string `''` is valid for scalar operands. + + .. note:: + + ``torch.einsum`` handles ellipsis ('...') differently from NumPy in that it allows dimensions + covered by the ellipsis to be summed over, that is, ellipsis are not required to be part of the output. + + .. note:: + + This function uses opt_einsum (https://optimized-einsum.readthedocs.io/en/stable/) to speed up computation or to + consume less memory by optimizing contraction order. This optimization occurs when there are at least three + inputs, since the order does not matter otherwise. Note that finding _the_ optimal path is an NP-hard problem, + thus, opt_einsum relies on different heuristics to achieve near-optimal results. If opt_einsum is not available, + the default order is to contract from left to right. + + To bypass this default behavior, add the following line to disable the usage of opt_einsum and skip path + calculation: `torch.backends.opt_einsum.enabled = False` + + To specify which strategy you'd like for opt_einsum to compute the contraction path, add the following line: + `torch.backends.opt_einsum.strategy = 'auto'`. The default strategy is 'auto', and we also support 'greedy' and + 'optimal'. Disclaimer that the runtime of 'optimal' is factorial in the number of inputs! See more details in + the opt_einsum documentation (https://optimized-einsum.readthedocs.io/en/stable/path_finding.html). + + .. note:: + + As of PyTorch 1.10 :func:`torch.einsum` also supports the sublist format (see examples below). In this format, + subscripts for each operand are specified by sublists, list of integers in the range [0, 52). These sublists + follow their operands, and an extra sublist can appear at the end of the input to specify the output's + subscripts., e.g. `torch.einsum(op1, sublist1, op2, sublist2, ..., [subslist_out])`. Python's `Ellipsis` object + may be provided in a sublist to enable broadcasting as described in the Equation section above. + + Args: + equation (str): The subscripts for the Einstein summation. + operands (List[Tensor]): The tensors to compute the Einstein summation of. + + Examples:: + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> # trace + >>> torch.einsum('ii', torch.randn(4, 4)) + tensor(-1.2104) + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> # diagonal + >>> torch.einsum('ii->i', torch.randn(4, 4)) + tensor([-0.1034, 0.7952, -0.2433, 0.4545]) + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> # outer product + >>> x = torch.randn(5) + >>> y = torch.randn(4) + >>> torch.einsum('i,j->ij', x, y) + tensor([[ 0.1156, -0.2897, -0.3918, 0.4963], + [-0.3744, 0.9381, 1.2685, -1.6070], + [ 0.7208, -1.8058, -2.4419, 3.0936], + [ 0.1713, -0.4291, -0.5802, 0.7350], + [ 0.5704, -1.4290, -1.9323, 2.4480]]) + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> # batch matrix multiplication + >>> As = torch.randn(3, 2, 5) + >>> Bs = torch.randn(3, 5, 4) + >>> torch.einsum('bij,bjk->bik', As, Bs) + tensor([[[-1.0564, -1.5904, 3.2023, 3.1271], + [-1.6706, -0.8097, -0.8025, -2.1183]], + + [[ 4.2239, 0.3107, -0.5756, -0.2354], + [-1.4558, -0.3460, 1.5087, -0.8530]], + + [[ 2.8153, 1.8787, -4.3839, -1.2112], + [ 0.3728, -2.1131, 0.0921, 0.8305]]]) + + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> # with sublist format and ellipsis + >>> torch.einsum(As, [..., 0, 1], Bs, [..., 1, 2], [..., 0, 2]) + tensor([[[-1.0564, -1.5904, 3.2023, 3.1271], + [-1.6706, -0.8097, -0.8025, -2.1183]], + + [[ 4.2239, 0.3107, -0.5756, -0.2354], + [-1.4558, -0.3460, 1.5087, -0.8530]], + + [[ 2.8153, 1.8787, -4.3839, -1.2112], + [ 0.3728, -2.1131, 0.0921, 0.8305]]]) + + >>> # batch permute + >>> A = torch.randn(2, 3, 4, 5) + >>> torch.einsum('...ij->...ji', A).shape + torch.Size([2, 3, 5, 4]) + + >>> # equivalent to torch.nn.functional.bilinear + >>> A = torch.randn(3, 5, 4) + >>> l = torch.randn(2, 5) + >>> r = torch.randn(2, 4) + >>> torch.einsum('bn,anm,bm->ba', l, A, r) + tensor([[-0.3430, -5.2405, 0.4494], + [ 0.3311, 5.5201, -3.0356]]) + """ + import torch.backends.opt_einsum as opt_einsum + # This wrapper exists to support variadic args. + if len(args) < 2: + raise ValueError('einsum(): must specify the equation string and at least one operand, ' + 'or at least one operand and its subscripts list') + + equation = None + operands = None + + if isinstance(args[0], torch.Tensor): + # Convert the subscript list format which is an interleaving of operand and its subscripts + # list with an optional output subscripts list at the end (see documentation for more details on this) + # to the equation string format by creating the equation string from the subscripts list and grouping the + # input operands into a tensorlist (List[Tensor]). + def parse_subscript(n: int) -> str: + if n == Ellipsis: + return '...' + if n >= 0 and n < 26: + return chr(ord('A') + n) + if n >= 26 and n < 52: + return chr(ord('a') + n - 26) + raise ValueError('einsum(): subscript in subscript list is not within the valid range [0, 52)') + + # Parse subscripts for input operands + equation = ','.join(''.join(parse_subscript(s) for s in l) for l in args[1::2]) + + # Parse optional output subscripts (provided when the number of arguments is odd) + if len(args) % 2 == 1: + equation += '->' + ''.join(parse_subscript(s) for s in args[-1]) + operands = args[:-1:2] + else: + operands = args[::2] + else: + equation = args[0] + operands = args[1:] + + if has_torch_function(operands): + return handle_torch_function(einsum, operands, equation, *operands) + + if len(operands) == 1 and isinstance(operands[0], (list, tuple)): + # the old interface of passing the operands as one list argument + _operands = operands[0] + # recurse incase operands contains value that has torch function + # in the original implementation this line is omitted + return einsum(equation, *_operands) + + if len(operands) <= 2 or not opt_einsum.enabled: + # the path for contracting 0 or 1 time(s) is already optimized + # or the user has disabled using opt_einsum + return _VF.einsum(equation, operands) # type: ignore[attr-defined] + + path = None + if opt_einsum.is_available(): + _opt_einsum = opt_einsum.get_opt_einsum() + tupled_path = _opt_einsum.contract_path(equation, *operands, optimize=opt_einsum.strategy)[0] + # flatten path for dispatching to C++ + path = [item for pair in tupled_path for item in pair] + return _VF.einsum(equation, operands, path=path) # type: ignore[attr-defined] + + +# This wrapper exists to support variadic args. +if TYPE_CHECKING: + # The JIT doesn't understand Union, so only add type annotation for mypy + def meshgrid(*tensors: Union[Tensor, List[Tensor]], + indexing: Optional[str] = None) -> Tuple[Tensor, ...]: + return _meshgrid(*tensors, indexing=indexing) +else: + def meshgrid(*tensors, indexing: Optional[str] = None) -> Tuple[Tensor, ...]: + r"""Creates grids of coordinates specified by the 1D inputs in `attr`:tensors. + + This is helpful when you want to visualize data over some + range of inputs. See below for a plotting example. + + Given :math:`N` 1D tensors :math:`T_0 \ldots T_{N-1}` as + inputs with corresponding sizes :math:`S_0 \ldots S_{N-1}`, + this creates :math:`N` N-dimensional tensors :math:`G_0 \ldots + G_{N-1}`, each with shape :math:`(S_0, ..., S_{N-1})` where + the output :math:`G_i` is constructed by expanding :math:`T_i` + to the result shape. + + .. note:: + 0D inputs are treated equivalently to 1D inputs of a + single element. + + .. warning:: + `torch.meshgrid(*tensors)` currently has the same behavior + as calling `numpy.meshgrid(*arrays, indexing='ij')`. + + In the future `torch.meshgrid` will transition to + `indexing='xy'` as the default. + + https://github.com/pytorch/pytorch/issues/50276 tracks + this issue with the goal of migrating to NumPy's behavior. + + .. seealso:: + + :func:`torch.cartesian_prod` has the same effect but it + collects the data in a tensor of vectors. + + Args: + tensors (list of Tensor): list of scalars or 1 dimensional tensors. Scalars will be + treated as tensors of size :math:`(1,)` automatically + + indexing: (str, optional): the indexing mode, either "xy" + or "ij", defaults to "ij". See warning for future changes. + + If "xy" is selected, the first dimension corresponds + to the cardinality of the second input and the second + dimension corresponds to the cardinality of the first + input. + + If "ij" is selected, the dimensions are in the same + order as the cardinality of the inputs. + + Returns: + seq (sequence of Tensors): If the input has :math:`N` + tensors of size :math:`S_0 \ldots S_{N-1}``, then the + output will also have :math:`N` tensors, where each tensor + is of shape :math:`(S_0, ..., S_{N-1})`. + + Example:: + + >>> x = torch.tensor([1, 2, 3]) + >>> y = torch.tensor([4, 5, 6]) + + Observe the element-wise pairings across the grid, (1, 4), + (1, 5), ..., (3, 6). This is the same thing as the + cartesian product. + >>> grid_x, grid_y = torch.meshgrid(x, y, indexing='ij') + >>> grid_x + tensor([[1, 1, 1], + [2, 2, 2], + [3, 3, 3]]) + >>> grid_y + tensor([[4, 5, 6], + [4, 5, 6], + [4, 5, 6]]) + + This correspondence can be seen when these grids are + stacked properly. + >>> torch.equal(torch.cat(tuple(torch.dstack([grid_x, grid_y]))), + ... torch.cartesian_prod(x, y)) + True + + `torch.meshgrid` is commonly used to produce a grid for + plotting. + >>> # xdoctest: +REQUIRES(module:matplotlib) + >>> # xdoctest: +REQUIRES(env:DOCTEST_SHOW) + >>> import matplotlib.pyplot as plt + >>> xs = torch.linspace(-5, 5, steps=100) + >>> ys = torch.linspace(-5, 5, steps=100) + >>> x, y = torch.meshgrid(xs, ys, indexing='xy') + >>> z = torch.sin(torch.sqrt(x * x + y * y)) + >>> ax = plt.axes(projection='3d') + >>> ax.plot_surface(x.numpy(), y.numpy(), z.numpy()) + >>> plt.show() + + .. image:: ../_static/img/meshgrid.png + :width: 512 + + """ + return _meshgrid(*tensors, indexing=indexing) + + +def _meshgrid(*tensors, indexing: Optional[str]): + if has_torch_function(tensors): + return handle_torch_function(meshgrid, tensors, *tensors, indexing=indexing) + if len(tensors) == 1 and isinstance(tensors[0], (list, tuple)): + # the old interface of passing the operands as one list argument + tensors = tensors[0] # type: ignore[assignment] + + # Continue allowing call of old method that takes no indexing + # kwarg for forward compatibility reasons. + # + # Remove this two weeks after landing. + kwargs = {} if indexing is None else {'indexing': indexing} + return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] + + +def stft(input: Tensor, n_fft: int, hop_length: Optional[int] = None, + win_length: Optional[int] = None, window: Optional[Tensor] = None, + center: bool = True, pad_mode: str = 'reflect', normalized: bool = False, + onesided: Optional[bool] = None, + return_complex: Optional[bool] = None) -> Tensor: + r"""Short-time Fourier transform (STFT). + + .. warning:: + From version 1.8.0, :attr:`return_complex` must always be given + explicitly for real inputs and `return_complex=False` has been + deprecated. Strongly prefer `return_complex=True` as in a future + pytorch release, this function will only return complex tensors. + + Note that :func:`torch.view_as_real` can be used to recover a real + tensor with an extra last dimension for real and imaginary components. + + .. warning:: + From version 2.1, a warning will be provided if a :attr:`window` is + not specified. In a future release, this attribute will be required. + Not providing a window currently defaults to using a rectangular window, + which may result in undesirable artifacts. Consider using tapered windows, + such as :func:`torch.hann_window`. + + The STFT computes the Fourier transform of short overlapping windows of the + input. This giving frequency components of the signal as they change over + time. The interface of this function is modeled after (but *not* a drop-in + replacement for) librosa_ stft function. + + .. _librosa: https://librosa.org/doc/latest/generated/librosa.stft.html + + Ignoring the optional batch dimension, this method computes the following + expression: + + .. math:: + X[\omega, m] = \sum_{k = 0}^{\text{win\_length-1}}% + \text{window}[k]\ \text{input}[m \times \text{hop\_length} + k]\ % + \exp\left(- j \frac{2 \pi \cdot \omega k}{\text{n\_fft}}\right), + + where :math:`m` is the index of the sliding window, and :math:`\omega` is + the frequency :math:`0 \leq \omega < \text{n\_fft}` for ``onesided=False``, + or :math:`0 \leq \omega < \lfloor \text{n\_fft} / 2 \rfloor + 1` for ``onesided=True``. + + * :attr:`input` must be either a 1-D time sequence or a 2-D batch of time + sequences. + + * If :attr:`hop_length` is ``None`` (default), it is treated as equal to + ``floor(n_fft / 4)``. + + * If :attr:`win_length` is ``None`` (default), it is treated as equal to + :attr:`n_fft`. + + * :attr:`window` can be a 1-D tensor of size :attr:`win_length`, e.g., from + :meth:`torch.hann_window`. If :attr:`window` is ``None`` (default), it is + treated as if having :math:`1` everywhere in the window. If + :math:`\text{win\_length} < \text{n\_fft}`, :attr:`window` will be padded on + both sides to length :attr:`n_fft` before being applied. + + * If :attr:`center` is ``True`` (default), :attr:`input` will be padded on + both sides so that the :math:`t`-th frame is centered at time + :math:`t \times \text{hop\_length}`. Otherwise, the :math:`t`-th frame + begins at time :math:`t \times \text{hop\_length}`. + + * :attr:`pad_mode` determines the padding method used on :attr:`input` when + :attr:`center` is ``True``. See :meth:`torch.nn.functional.pad` for + all available options. Default is ``"reflect"``. + + * If :attr:`onesided` is ``True`` (default for real input), only values for + :math:`\omega` in :math:`\left[0, 1, 2, \dots, \left\lfloor + \frac{\text{n\_fft}}{2} \right\rfloor + 1\right]` are returned because + the real-to-complex Fourier transform satisfies the conjugate symmetry, + i.e., :math:`X[m, \omega] = X[m, \text{n\_fft} - \omega]^*`. + Note if the input or window tensors are complex, then :attr:`onesided` + output is not possible. + + * If :attr:`normalized` is ``True`` (default is ``False``), the function + returns the normalized STFT results, i.e., multiplied by :math:`(\text{frame\_length})^{-0.5}`. + + * If :attr:`return_complex` is ``True`` (default if input is complex), the + return is a ``input.dim() + 1`` dimensional complex tensor. If ``False``, + the output is a ``input.dim() + 2`` dimensional real tensor where the last + dimension represents the real and imaginary components. + + Returns either a complex tensor of size :math:`(* \times N \times T)` if + :attr:`return_complex` is true, or a real tensor of size :math:`(* \times N + \times T \times 2)`. Where :math:`*` is the optional batch size of + :attr:`input`, :math:`N` is the number of frequencies where STFT is applied + and :math:`T` is the total number of frames used. + + .. warning:: + This function changed signature at version 0.4.1. Calling with the + previous signature may cause error or return incorrect result. + + Args: + input (Tensor): the input tensor of shape `(B?, L)` where `B?` is an optional + batch dimension + n_fft (int): size of Fourier transform + hop_length (int, optional): the distance between neighboring sliding window + frames. Default: ``None`` (treated as equal to ``floor(n_fft / 4)``) + win_length (int, optional): the size of window frame and STFT filter. + Default: ``None`` (treated as equal to :attr:`n_fft`) + window (Tensor, optional): the optional window function. + Shape must be 1d and `<= n_fft` + Default: ``None`` (treated as window of all :math:`1` s) + center (bool, optional): whether to pad :attr:`input` on both sides so + that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`. + Default: ``True`` + pad_mode (str, optional): controls the padding method used when + :attr:`center` is ``True``. Default: ``"reflect"`` + normalized (bool, optional): controls whether to return the normalized STFT results + Default: ``False`` + onesided (bool, optional): controls whether to return half of results to + avoid redundancy for real inputs. + Default: ``True`` for real :attr:`input` and :attr:`window`, ``False`` otherwise. + return_complex (bool, optional): whether to return a complex tensor, or + a real tensor with an extra last dimension for the real and + imaginary components. + + .. versionchanged:: 2.0 + ``return_complex`` is now a required argument for real inputs, + as the default is being transitioned to ``True``. + + .. deprecated:: 2.0 + ``return_complex=False`` is deprecated, instead use ``return_complex=True`` + Note that calling :func:`torch.view_as_real` on the output will + recover the deprecated output format. + + Returns: + Tensor: A tensor containing the STFT result with shape `(B?, N, T, C?)` where + - `B?` is an optional batch dimension from the input. + - `N` is the number of frequency samples, `(n_fft // 2) + 1` for + `onesided=True`, or otherwise `n_fft`. + - `T` is the number of frames, `1 + L // hop_length` + for `center=True`, or `1 + (L - n_fft) // hop_length` otherwise. + - `C?` is an optional length-2 dimension of real and imaginary + components, present when `return_complex=False`. + + """ + if has_torch_function_unary(input): + return handle_torch_function( + stft, (input,), input, n_fft, hop_length=hop_length, win_length=win_length, + window=window, center=center, pad_mode=pad_mode, normalized=normalized, + onesided=onesided, return_complex=return_complex) + # NOTE: Do not edit. This code will be removed once the forward-compatibility + # period is over for PR #73432 + if center: + signal_dim = input.dim() + extended_shape = [1] * (3 - signal_dim) + list(input.size()) + pad = int(n_fft // 2) + input = F.pad(input.view(extended_shape), [pad, pad], pad_mode) + input = input.view(input.shape[-signal_dim:]) + return _VF.stft(input, n_fft, hop_length, win_length, window, # type: ignore[attr-defined] + normalized, onesided, return_complex) + + +istft = _add_docstr( + torch.istft, + "istft(input, n_fft, hop_length=None, win_length=None, window=None, center=True, " + "normalized=False, onesided=None, length=None, return_complex=False) -> Tensor:\n" + r""" +Inverse short time Fourier Transform. This is expected to be the inverse of :func:`~torch.stft`. + +.. warning:: + From version 2.1, a warning will be provided if a :attr:`window` is + not specified. In a future release, this attribute will be required. + Please provide the same window used in the stft call. + +It has the same parameters (+ additional optional parameter of :attr:`length`) and it should return the +least squares estimation of the original signal. The algorithm will check using the NOLA condition ( +nonzero overlap). + +Important consideration in the parameters :attr:`window` and :attr:`center` so that the envelope +created by the summation of all the windows is never zero at certain point in time. Specifically, +:math:`\sum_{t=-\infty}^{\infty} |w|^2[n-t\times hop\_length] \cancel{=} 0`. + +Since :func:`~torch.stft` discards elements at the end of the signal if they do not fit in a frame, +``istft`` may return a shorter signal than the original signal (can occur if :attr:`center` is False +since the signal isn't padded). If `length` is given in the arguments and is longer than expected, +``istft`` will pad zeros to the end of the returned signal. + +If :attr:`center` is ``True``, then there will be padding e.g. ``'constant'``, ``'reflect'``, etc. +Left padding can be trimmed off exactly because they can be calculated but right padding cannot be +calculated without additional information. + +Example: Suppose the last window is: +``[17, 18, 0, 0, 0]`` vs ``[18, 0, 0, 0, 0]`` + +The :attr:`n_fft`, :attr:`hop_length`, :attr:`win_length` are all the same which prevents the calculation +of right padding. These additional values could be zeros or a reflection of the signal so providing +:attr:`length` could be useful. If :attr:`length` is ``None`` then padding will be aggressively removed +(some loss of signal). + +[1] D. W. Griffin and J. S. Lim, "Signal estimation from modified short-time Fourier transform," +IEEE Trans. ASSP, vol.32, no.2, pp.236-243, Apr. 1984. + +Args: + input (Tensor): The input tensor. Expected to be in the format of :func:`~torch.stft`, + output. That is a complex tensor of shape `(B?, N, T)` where + + - `B?` is an optional batch dimension + - `N` is the number of frequency samples, `(n_fft // 2) + 1` + for onesided input, or otherwise `n_fft`. + - `T` is the number of frames, `1 + length // hop_length` for centered stft, + or `1 + (length - n_fft) // hop_length` otherwise. + + .. versionchanged:: 2.0 + Real datatype inputs are no longer supported. Input must now have a + complex datatype, as returned by ``stft(..., return_complex=True)``. + n_fft (int): Size of Fourier transform + hop_length (Optional[int]): The distance between neighboring sliding window frames. + (Default: ``n_fft // 4``) + win_length (Optional[int]): The size of window frame and STFT filter. (Default: ``n_fft``) + window (Optional[torch.Tensor]): The optional window function. + Shape must be 1d and `<= n_fft` + (Default: ``torch.ones(win_length)``) + center (bool): Whether :attr:`input` was padded on both sides so that the :math:`t`-th frame is + centered at time :math:`t \times \text{hop\_length}`. + (Default: ``True``) + normalized (bool): Whether the STFT was normalized. (Default: ``False``) + onesided (Optional[bool]): Whether the STFT was onesided. + (Default: ``True`` if `n_fft != fft_size` in the input size) + length (Optional[int]): The amount to trim the signal by (i.e. the + original signal length). Defaults to `(T - 1) * hop_length` for + centered stft, or `n_fft + (T - 1) * hop_length` otherwise, where `T` + is the number of input frames. + return_complex (Optional[bool]): + Whether the output should be complex, or if the input should be + assumed to derive from a real signal and window. + Note that this is incompatible with ``onesided=True``. + (Default: ``False``) + +Returns: + Tensor: Least squares estimation of the original signal of shape `(B?, length)` where + `B?` is an optional batch dimension from the input tensor. +""") + + +if TYPE_CHECKING: + # These _impl functions return a variable number of tensors as output with + # __torch_function__; tuple unpacking is done already rather than being + # done by the caller of the _impl function + _unique_impl_out = Any +else: + _unique_impl_out = Tuple[Tensor, Tensor, Tensor] + + +def _unique_impl(input: Tensor, sorted: bool = True, + return_inverse: bool = False, return_counts: bool = False, + dim: Optional[int] = None) -> _unique_impl_out: + r"""unique(input, sorted=True, return_inverse=False, return_counts=False, dim=None) -> Tuple[Tensor, Tensor, Tensor] + + Returns the unique elements of the input tensor. + + .. note:: This function is different from :func:`torch.unique_consecutive` in the sense that + this function also eliminates non-consecutive duplicate values. + + .. note:: Currently in the CUDA implementation and the CPU implementation, + `torch.unique` always sort the tensor at the beginning regardless of the `sort` argument. + Sorting could be slow, so if your input tensor is already sorted, it is recommended to use + :func:`torch.unique_consecutive` which avoids the sorting. + + Args: + input (Tensor): the input tensor + sorted (bool): Whether to sort the unique elements in ascending order + before returning as output. + return_inverse (bool): Whether to also return the indices for where + elements in the original input ended up in the returned unique list. + return_counts (bool): Whether to also return the counts for each unique + element. + dim (int, optional): the dimension to operate upon. If ``None``, the + unique of the flattened input is returned. Otherwise, each of the + tensors indexed by the given dimension is treated as one of the + elements to apply the unique operation upon. See examples for more + details. Default: ``None`` + + Returns: + (Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing + + - **output** (*Tensor*): the output list of unique scalar elements. + - **inverse_indices** (*Tensor*): (optional) if + :attr:`return_inverse` is True, there will be an additional + returned tensor (same shape as input) representing the indices + for where elements in the original input map to in the output; + otherwise, this function will only return a single tensor. + - **counts** (*Tensor*): (optional) if + :attr:`return_counts` is True, there will be an additional + returned tensor (same shape as output or output.size(dim), + if dim was specified) representing the number of occurrences + for each unique value or tensor. + + Example:: + + >>> output = torch.unique(torch.tensor([1, 3, 2, 3], dtype=torch.long)) + >>> output + tensor([1, 2, 3]) + + >>> output, inverse_indices = torch.unique( + ... torch.tensor([1, 3, 2, 3], dtype=torch.long), sorted=True, return_inverse=True) + >>> output + tensor([1, 2, 3]) + >>> inverse_indices + tensor([0, 2, 1, 2]) + + >>> output, inverse_indices = torch.unique( + ... torch.tensor([[1, 3], [2, 3]], dtype=torch.long), sorted=True, return_inverse=True) + >>> output + tensor([1, 2, 3]) + >>> inverse_indices + tensor([[0, 2], + [1, 2]]) + + >>> a = torch.tensor([ + ... [ + ... [1, 1, 0, 0], + ... [1, 1, 0, 0], + ... [0, 0, 1, 1], + ... ], + ... [ + ... [0, 0, 1, 1], + ... [0, 0, 1, 1], + ... [1, 1, 1, 1], + ... ], + ... [ + ... [1, 1, 0, 0], + ... [1, 1, 0, 0], + ... [0, 0, 1, 1], + ... ], + ... ]) + + >>> # If we call `torch.unique(a, dim=0)`, each of the tensors `a[idx, :, :]` + >>> # will be compared. We can see that `a[0, :, :]` and `a[2, :, :]` match + >>> # each other, so one of them will be removed. + >>> (a[0, :, :] == a[2, :, :]).all() + tensor(True) + >>> a_unique_dim0 = torch.unique(a, dim=0) + >>> a_unique_dim0 + tensor([[[0, 0, 1, 1], + [0, 0, 1, 1], + [1, 1, 1, 1]], + [[1, 1, 0, 0], + [1, 1, 0, 0], + [0, 0, 1, 1]]]) + + >>> # Notice which sub-tensors from `a` match with the sub-tensors from + >>> # `a_unique_dim0`: + >>> (a_unique_dim0[0, :, :] == a[1, :, :]).all() + tensor(True) + >>> (a_unique_dim0[1, :, :] == a[0, :, :]).all() + tensor(True) + + >>> # For `torch.unique(a, dim=1)`, each of the tensors `a[:, idx, :]` are + >>> # compared. `a[:, 0, :]` and `a[:, 1, :]` match each other, so one of + >>> # them will be removed. + >>> (a[:, 0, :] == a[:, 1, :]).all() + tensor(True) + >>> torch.unique(a, dim=1) + tensor([[[0, 0, 1, 1], + [1, 1, 0, 0]], + [[1, 1, 1, 1], + [0, 0, 1, 1]], + [[0, 0, 1, 1], + [1, 1, 0, 0]]]) + + >>> # For `torch.unique(a, dim=2)`, the tensors `a[:, :, idx]` are compared. + >>> # `a[:, :, 0]` and `a[:, :, 1]` match each other. Also, `a[:, :, 2]` and + >>> # `a[:, :, 3]` match each other as well. So in this case, two of the + >>> # sub-tensors will be removed. + >>> (a[:, :, 0] == a[:, :, 1]).all() + tensor(True) + >>> (a[:, :, 2] == a[:, :, 3]).all() + tensor(True) + >>> torch.unique(a, dim=2) + tensor([[[0, 1], + [0, 1], + [1, 0]], + [[1, 0], + [1, 0], + [1, 1]], + [[0, 1], + [0, 1], + [1, 0]]]) + """ + if has_torch_function_unary(input): + return handle_torch_function( + unique, (input,), input, sorted=sorted, return_inverse=return_inverse, + return_counts=return_counts, dim=dim) + + if dim is not None: + output, inverse_indices, counts = _VF.unique_dim( + input, + dim, + sorted=sorted, + return_inverse=return_inverse, + return_counts=return_counts, + ) + else: + output, inverse_indices, counts = torch._unique2( + input, + sorted=sorted, + return_inverse=return_inverse, + return_counts=return_counts, + ) + return output, inverse_indices, counts + + +def _unique_consecutive_impl(input: Tensor, return_inverse: bool = False, + return_counts: bool = False, + dim: Optional[int] = None) -> _unique_impl_out: + r"""Eliminates all but the first element from every consecutive group of equivalent elements. + + .. note:: This function is different from :func:`torch.unique` in the sense that this function + only eliminates consecutive duplicate values. This semantics is similar to `std::unique` + in C++. + + Args: + input (Tensor): the input tensor + return_inverse (bool): Whether to also return the indices for where + elements in the original input ended up in the returned unique list. + return_counts (bool): Whether to also return the counts for each unique + element. + dim (int): the dimension to apply unique. If ``None``, the unique of the + flattened input is returned. default: ``None`` + + Returns: + (Tensor, Tensor (optional), Tensor (optional)): A tensor or a tuple of tensors containing + + - **output** (*Tensor*): the output list of unique scalar elements. + - **inverse_indices** (*Tensor*): (optional) if + :attr:`return_inverse` is True, there will be an additional + returned tensor (same shape as input) representing the indices + for where elements in the original input map to in the output; + otherwise, this function will only return a single tensor. + - **counts** (*Tensor*): (optional) if + :attr:`return_counts` is True, there will be an additional + returned tensor (same shape as output or output.size(dim), + if dim was specified) representing the number of occurrences + for each unique value or tensor. + + Example:: + + >>> x = torch.tensor([1, 1, 2, 2, 3, 1, 1, 2]) + >>> output = torch.unique_consecutive(x) + >>> output + tensor([1, 2, 3, 1, 2]) + + >>> output, inverse_indices = torch.unique_consecutive(x, return_inverse=True) + >>> output + tensor([1, 2, 3, 1, 2]) + >>> inverse_indices + tensor([0, 0, 1, 1, 2, 3, 3, 4]) + + >>> output, counts = torch.unique_consecutive(x, return_counts=True) + >>> output + tensor([1, 2, 3, 1, 2]) + >>> counts + tensor([2, 2, 1, 2, 1]) + """ + if has_torch_function_unary(input): + return handle_torch_function( + unique_consecutive, (input,), input, return_inverse=return_inverse, + return_counts=return_counts, dim=dim) + output, inverse_indices, counts = _VF.unique_consecutive( # type: ignore[attr-defined] + input, return_inverse=return_inverse, return_counts=return_counts, dim=dim) + return output, inverse_indices, counts + + +def _return_counts(input, sorted=True, return_inverse=False, return_counts=False, dim=None): + # type: (Tensor, bool, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor] + + if has_torch_function_unary(input): + return _unique_impl(input, sorted, return_inverse, return_counts, dim) + + output, _, counts = _unique_impl(input, sorted, return_inverse, return_counts, dim) + return output, counts + + +def _return_output(input, sorted=True, return_inverse=False, return_counts=False, dim=None): + # type: (Tensor, bool, bool, bool, Optional[int]) -> Tensor + + if has_torch_function_unary(input): + return _unique_impl(input, sorted, return_inverse, return_counts, dim) + + output, _, _ = _unique_impl(input, sorted, return_inverse, return_counts, dim) + return output + + +def _return_inverse(input, sorted=True, return_inverse=False, return_counts=False, dim=None): + # type: (Tensor, bool, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor] + + if has_torch_function_unary(input): + return _unique_impl(input, sorted, return_inverse, return_counts, dim) + + output, inverse_indices, _ = _unique_impl(input, sorted, return_inverse, return_counts, dim) + return output, inverse_indices + + +_return_inverse_false = boolean_dispatch( + arg_name='return_counts', + arg_index=3, + default=False, + if_true=_return_counts, + if_false=_return_output, + module_name=__name__, + func_name='unique') + +_return_inverse_true = boolean_dispatch( + arg_name='return_counts', + arg_index=3, + default=False, + if_true=_unique_impl, + if_false=_return_inverse, + module_name=__name__, + func_name='unique') + +# The return type of unique depends on `return_inverse`, and `return_counts` so in order to +# resolve the output type in TorchScript we need to statically know the value of both parameters + +unique = boolean_dispatch( + arg_name='return_inverse', + arg_index=2, + default=False, + if_true=_return_inverse_true, + if_false=_return_inverse_false, + module_name=__name__, + func_name='unique') +unique.__doc__ = _unique_impl.__doc__ + + +def _consecutive_return_counts(input, return_inverse=False, return_counts=False, dim=None): + # type: (Tensor, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor] + + if has_torch_function_unary(input): + return _unique_consecutive_impl(input, return_inverse, return_counts, dim) + + output, _, counts = _unique_consecutive_impl(input, return_inverse, return_counts, dim) + return output, counts + + +def _consecutive_return_output(input, return_inverse=False, return_counts=False, dim=None): + # type: (Tensor, bool, bool, Optional[int]) -> Tensor + + if has_torch_function_unary(input): + return _unique_consecutive_impl(input, return_inverse, return_counts, dim) + + output, _, _ = _unique_consecutive_impl(input, return_inverse, return_counts, dim) + return output + + +def _consecutive_return_inverse(input, return_inverse=False, return_counts=False, dim=None): + # type: (Tensor, bool, bool, Optional[int]) -> Tuple[Tensor, Tensor] + + if has_torch_function_unary(input): + return _unique_consecutive_impl(input, return_inverse, return_counts, dim) + + output, inverse_indices, _ = _unique_consecutive_impl(input, return_inverse, return_counts, dim) + return output, inverse_indices + + +_consecutive_return_inverse_false = boolean_dispatch( + arg_name='return_counts', + arg_index=1, + default=False, + if_true=_consecutive_return_counts, + if_false=_consecutive_return_output, + module_name=__name__, + func_name='unique_consecutive') + +_consecutive_return_inverse_true = boolean_dispatch( + arg_name='return_counts', + arg_index=1, + default=False, + if_true=_unique_consecutive_impl, + if_false=_consecutive_return_inverse, + module_name=__name__, + func_name='unique_consecutive') + +# The return type of unique depends on `return_inverse`, and `return_counts` so in order to +# resolve the output type in TorchScript we need to statically know the value of both parameters + +unique_consecutive = boolean_dispatch( + arg_name='return_inverse', + arg_index=2, + default=False, + if_true=_consecutive_return_inverse_true, + if_false=_consecutive_return_inverse_false, + module_name=__name__, + func_name='unique_consecutive') +unique_consecutive.__doc__ = _unique_consecutive_impl.__doc__ + +if TYPE_CHECKING: + pass + # There's no good way to use this type annotation without breaking JIT + # overloads. So leave untyped for mypy for now. +else: + @overload + def tensordot(a, b, dims: int = 2, out: Optional[torch.Tensor] = None): + pass + + @overload # noqa: F811 + def tensordot(a, b, dims: Tuple[List[int], List[int]], out: Optional[torch.Tensor] = None): # noqa: F811 + pass + + @overload # noqa: F811 + def tensordot(a, b, dims: List[List[int]], out: Optional[torch.Tensor] = None): # noqa: F811 + pass + + @overload # noqa: F811 + def tensordot(a, b, dims: torch.Tensor, out: Optional[torch.Tensor] = None): # noqa: F811 + pass + + +def tensordot(a, b, dims=2, out: Optional[torch.Tensor] = None): # noqa: F811 + r"""Returns a contraction of a and b over multiple dimensions. + + :attr:`tensordot` implements a generalized matrix product. + + Args: + a (Tensor): Left tensor to contract + b (Tensor): Right tensor to contract + dims (int or Tuple[List[int], List[int]] or List[List[int]] containing two lists or Tensor): number of dimensions to + contract or explicit lists of dimensions for :attr:`a` and + :attr:`b` respectively + + When called with a non-negative integer argument :attr:`dims` = :math:`d`, and + the number of dimensions of :attr:`a` and :attr:`b` is :math:`m` and :math:`n`, + respectively, :func:`~torch.tensordot` computes + + .. math:: + r_{i_0,...,i_{m-d}, i_d,...,i_n} + = \sum_{k_0,...,k_{d-1}} a_{i_0,...,i_{m-d},k_0,...,k_{d-1}} \times b_{k_0,...,k_{d-1}, i_d,...,i_n}. + + When called with :attr:`dims` of the list form, the given dimensions will be contracted + in place of the last :math:`d` of :attr:`a` and the first :math:`d` of :math:`b`. The sizes + in these dimensions must match, but :func:`~torch.tensordot` will deal with broadcasted + dimensions. + + Examples:: + + >>> a = torch.arange(60.).reshape(3, 4, 5) + >>> b = torch.arange(24.).reshape(4, 3, 2) + >>> torch.tensordot(a, b, dims=([1, 0], [0, 1])) + tensor([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) + >>> a = torch.randn(3, 4, 5, device='cuda') + >>> b = torch.randn(4, 5, 6, device='cuda') + >>> c = torch.tensordot(a, b, dims=2).cpu() + tensor([[ 8.3504, -2.5436, 6.2922, 2.7556, -1.0732, 3.2741], + [ 3.3161, 0.0704, 5.0187, -0.4079, -4.3126, 4.8744], + [ 0.8223, 3.9445, 3.2168, -0.2400, 3.4117, 1.7780]]) + + >>> a = torch.randn(3, 5, 4, 6) + >>> b = torch.randn(6, 4, 5, 3) + >>> torch.tensordot(a, b, dims=([2, 1, 3], [1, 2, 0])) + tensor([[ 7.7193, -2.4867, -10.3204], + [ 1.5513, -14.4737, -6.5113], + [ -0.2850, 4.2573, -3.5997]]) + """ + if has_torch_function_variadic(a, b): + return handle_torch_function(tensordot, (a, b), a, b, dims=dims, out=out) + + if not isinstance(dims, (tuple, list, torch.Tensor, int, torch.SymInt)): + raise RuntimeError("tensordot expects dims to be int or " + + "Tuple[List[int], List[int]] or " + + "List[List[int]] containing two lists, but got " + + f"dims={dims}") + + dims_a: List[int] = [] + dims_b: List[int] = [] + + if isinstance(dims, (tuple, list)): + dims_a, dims_b = dims + + if isinstance(dims, torch.Tensor): + num_elements = dims.numel() + if num_elements > 1: + assert dims.size()[0] == 2 + dims_a = torch.jit.annotate(List[int], dims[0].tolist()) + dims_b = torch.jit.annotate(List[int], dims[1].tolist()) + else: + dims_val = int(dims.item()) + if dims_val < 0: + raise RuntimeError(f"tensordot expects dims >= 0, but got dims={dims}") + dims_a = list(range(-dims_val, 0)) + dims_b = list(range(dims_val)) + + if isinstance(dims, (int, torch.SymInt)): + if dims < 0: + raise RuntimeError(f"tensordot expects dims >= 0, but got dims={dims}") + if dims > min(a.dim(), b.dim()): + raise RuntimeError(f"tensordot expects dims < ndim_a or ndim_b, but got dims={dims}") + dims_a = list(range(-dims, 0)) + dims_b = list(range(dims)) + + if out is None: + return _VF.tensordot(a, b, dims_a, dims_b) # type: ignore[attr-defined] + else: + return _VF.tensordot(a, b, dims_a, dims_b, out=out) # type: ignore[attr-defined] + + +def cartesian_prod(*tensors: Tensor) -> Tensor: + """Do cartesian product of the given sequence of tensors. The behavior is similar to + python's `itertools.product`. + + Args: + *tensors: any number of 1 dimensional tensors. + + Returns: + Tensor: A tensor equivalent to converting all the input tensors into lists, + do `itertools.product` on these lists, and finally convert the resulting list + into tensor. + + Example:: + + >>> import itertools + >>> a = [1, 2, 3] + >>> b = [4, 5] + >>> list(itertools.product(a, b)) + [(1, 4), (1, 5), (2, 4), (2, 5), (3, 4), (3, 5)] + >>> tensor_a = torch.tensor(a) + >>> tensor_b = torch.tensor(b) + >>> torch.cartesian_prod(tensor_a, tensor_b) + tensor([[1, 4], + [1, 5], + [2, 4], + [2, 5], + [3, 4], + [3, 5]]) + """ + # This wrapper exists to support variadic args. + if has_torch_function(tensors): + return handle_torch_function(cartesian_prod, tensors, *tensors) + return _VF.cartesian_prod(tensors) # type: ignore[attr-defined] + + +def block_diag(*tensors): + """Create a block diagonal matrix from provided tensors. + + Args: + *tensors: One or more tensors with 0, 1, or 2 dimensions. + + Returns: + Tensor: A 2 dimensional tensor with all the input tensors arranged in + order such that their upper left and lower right corners are + diagonally adjacent. All other elements are set to 0. + + Example:: + + >>> import torch + >>> A = torch.tensor([[0, 1], [1, 0]]) + >>> B = torch.tensor([[3, 4, 5], [6, 7, 8]]) + >>> C = torch.tensor(7) + >>> D = torch.tensor([1, 2, 3]) + >>> E = torch.tensor([[4], [5], [6]]) + >>> torch.block_diag(A, B, C, D, E) + tensor([[0, 1, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 3, 4, 5, 0, 0, 0, 0, 0], + [0, 0, 6, 7, 8, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 7, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 1, 2, 3, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 4], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 5], + [0, 0, 0, 0, 0, 0, 0, 0, 0, 6]]) + """ + # This wrapper exists to support variadic args. + if has_torch_function(tensors): + return handle_torch_function(block_diag, tensors, *tensors) + return torch._C._VariableFunctions.block_diag(tensors) # type: ignore[attr-defined] + + +def cdist(x1, x2, p=2., compute_mode='use_mm_for_euclid_dist_if_necessary'): + # type: (Tensor, Tensor, float, str) -> (Tensor) + r"""Computes batched the p-norm distance between each pair of the two collections of row vectors. + + Args: + x1 (Tensor): input tensor of shape :math:`B \times P \times M`. + x2 (Tensor): input tensor of shape :math:`B \times R \times M`. + p: p value for the p-norm distance to calculate between each vector pair + :math:`\in [0, \infty]`. + compute_mode: + 'use_mm_for_euclid_dist_if_necessary' - will use matrix multiplication approach to calculate + euclidean distance (p = 2) if P > 25 or R > 25 + 'use_mm_for_euclid_dist' - will always use matrix multiplication approach to calculate + euclidean distance (p = 2) + 'donot_use_mm_for_euclid_dist' - will never use matrix multiplication approach to calculate + euclidean distance (p = 2) + Default: use_mm_for_euclid_dist_if_necessary. + + If x1 has shape :math:`B \times P \times M` and x2 has shape :math:`B \times R \times M` then the + output will have shape :math:`B \times P \times R`. + + This function is equivalent to `scipy.spatial.distance.cdist(input,'minkowski', p=p)` + if :math:`p \in (0, \infty)`. When :math:`p = 0` it is equivalent to + `scipy.spatial.distance.cdist(input, 'hamming') * M`. When :math:`p = \infty`, the closest + scipy function is `scipy.spatial.distance.cdist(xn, lambda x, y: np.abs(x - y).max())`. + + Example: + + >>> a = torch.tensor([[0.9041, 0.0196], [-0.3108, -2.4423], [-0.4821, 1.059]]) + >>> a + tensor([[ 0.9041, 0.0196], + [-0.3108, -2.4423], + [-0.4821, 1.0590]]) + >>> b = torch.tensor([[-2.1763, -0.4713], [-0.6986, 1.3702]]) + >>> b + tensor([[-2.1763, -0.4713], + [-0.6986, 1.3702]]) + >>> torch.cdist(a, b, p=2) + tensor([[3.1193, 2.0959], + [2.7138, 3.8322], + [2.2830, 0.3791]]) + """ + if has_torch_function_variadic(x1, x2): + return handle_torch_function( + cdist, (x1, x2), x1, x2, p=p, compute_mode=compute_mode) + if compute_mode == 'use_mm_for_euclid_dist_if_necessary': + return _VF.cdist(x1, x2, p, None) # type: ignore[attr-defined] + elif compute_mode == 'use_mm_for_euclid_dist': + return _VF.cdist(x1, x2, p, 1) # type: ignore[attr-defined] + elif compute_mode == 'donot_use_mm_for_euclid_dist': + return _VF.cdist(x1, x2, p, 2) # type: ignore[attr-defined] + else: + raise ValueError(f"{compute_mode} is not a valid value for compute_mode") + + +def atleast_1d(*tensors): + r""" + Returns a 1-dimensional view of each input tensor with zero dimensions. + Input tensors with one or more dimensions are returned as-is. + + Args: + input (Tensor or list of Tensors) + + Returns: + output (Tensor or tuple of Tensors) + + Example:: + + >>> x = torch.arange(2) + >>> x + tensor([0, 1]) + >>> torch.atleast_1d(x) + tensor([0, 1]) + >>> x = torch.tensor(1.) + >>> x + tensor(1.) + >>> torch.atleast_1d(x) + tensor([1.]) + >>> x = torch.tensor(0.5) + >>> y = torch.tensor(1.) + >>> torch.atleast_1d((x, y)) + (tensor([0.5000]), tensor([1.])) + """ + # This wrapper exists to support variadic args. + if has_torch_function(tensors): + return handle_torch_function(atleast_1d, tensors, *tensors) + if len(tensors) == 1: + tensors = tensors[0] + return _VF.atleast_1d(tensors) # type: ignore[attr-defined] + + +def atleast_2d(*tensors): + r""" + Returns a 2-dimensional view of each input tensor with zero dimensions. + Input tensors with two or more dimensions are returned as-is. + + Args: + input (Tensor or list of Tensors) + + Returns: + output (Tensor or tuple of Tensors) + + Example:: + + >>> x = torch.tensor(1.) + >>> x + tensor(1.) + >>> torch.atleast_2d(x) + tensor([[1.]]) + >>> x = torch.arange(4).view(2, 2) + >>> x + tensor([[0, 1], + [2, 3]]) + >>> torch.atleast_2d(x) + tensor([[0, 1], + [2, 3]]) + >>> x = torch.tensor(0.5) + >>> y = torch.tensor(1.) + >>> torch.atleast_2d((x, y)) + (tensor([[0.5000]]), tensor([[1.]])) + """ + # This wrapper exists to support variadic args. + if has_torch_function(tensors): + return handle_torch_function(atleast_2d, tensors, *tensors) + if len(tensors) == 1: + tensors = tensors[0] + return _VF.atleast_2d(tensors) # type: ignore[attr-defined] + + +def atleast_3d(*tensors): + r""" + Returns a 3-dimensional view of each input tensor with zero dimensions. + Input tensors with three or more dimensions are returned as-is. + + Args: + input (Tensor or list of Tensors) + + Returns: + output (Tensor or tuple of Tensors) + + Example: + + >>> x = torch.tensor(0.5) + >>> x + tensor(0.5000) + >>> torch.atleast_3d(x) + tensor([[[0.5000]]]) + >>> y = torch.arange(4).view(2, 2) + >>> y + tensor([[0, 1], + [2, 3]]) + >>> torch.atleast_3d(y) + tensor([[[0], + [1]], + + [[2], + [3]]]) + >>> x = torch.tensor(1).view(1, 1, 1) + >>> x + tensor([[[1]]]) + >>> torch.atleast_3d(x) + tensor([[[1]]]) + >>> x = torch.tensor(0.5) + >>> y = torch.tensor(1.) + >>> torch.atleast_3d((x, y)) + (tensor([[[0.5000]]]), tensor([[[1.]]])) + """ + # This wrapper exists to support variadic args. + if has_torch_function(tensors): + return handle_torch_function(atleast_3d, tensors, *tensors) + if len(tensors) == 1: + tensors = tensors[0] + return _VF.atleast_3d(tensors) # type: ignore[attr-defined] + + +if TYPE_CHECKING: + pass + # There's no good way to use this type annotation; cannot rename norm() to + # _norm_impl() in a way that doesn't break JIT overloads. So leave untyped + # for mypy for now. + # def norm(input: Tensor, + # p: Optional[Union[str, Number]] = "fro", + # dim: Optional[Union[int, List[int]]] = None, + # keepdim: bool = False, + # out: Optional[Tensor] = None, + # dtype: _dtype = None) -> Tensor: + # return _norm_impl(input, p, dim, keepdim, out, dtype) +else: + # TODO: type dim as BroadcastingList when + # https://github.com/pytorch/pytorch/issues/33782 is fixed + @overload + def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): + # type: (Tensor, str, Optional[List[int]], bool, Optional[Tensor], Optional[int]) -> Tensor + pass + + @overload # noqa: F811 + def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811 + # type: (Tensor, Optional[number], Optional[List[int]], bool, Optional[Tensor], Optional[int]) -> Tensor + pass + + @overload # noqa: F811 + def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811 + # type: (Tensor, Optional[number], Optional[int], bool, Optional[Tensor], Optional[int]) -> Tensor + pass + + @overload # noqa: F811 + def norm(input, p="fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811 + # type: (Tensor, str, Optional[int], bool, Optional[Tensor], Optional[int]) -> Tensor + pass + + +def norm(input, p: Optional[Union[float, str]] = "fro", dim=None, keepdim=False, out=None, dtype=None): # noqa: F811 + r"""Returns the matrix norm or vector norm of a given tensor. + + .. warning:: + + torch.norm is deprecated and may be removed in a future PyTorch release. + Its documentation and behavior may be incorrect, and it is no longer + actively maintained. + + Use :func:`torch.linalg.vector_norm` when computing vector norms and + :func:`torch.linalg.matrix_norm` when computing matrix norms. + For a function with a similar behavior as this one see :func:`torch.linalg.norm`. + Note, however, the signature for these functions is slightly different than the + signature for ``torch.norm``. + + Args: + input (Tensor): The input tensor. Its data type must be either a floating + point or complex type. For complex inputs, the norm is calculated using the + absolute value of each element. If the input is complex and neither + :attr:`dtype` nor :attr:`out` is specified, the result's data type will + be the corresponding floating point type (e.g. float if :attr:`input` is + complexfloat). + + p (int, float, inf, -inf, 'fro', 'nuc', optional): the order of norm. Default: ``'fro'`` + The following norms can be calculated: + + ====== ============== ========================== + ord matrix norm vector norm + ====== ============== ========================== + 'fro' Frobenius norm -- + 'nuc' nuclear norm -- + Number -- sum(abs(x)**ord)**(1./ord) + ====== ============== ========================== + + The vector norm can be calculated across any number of dimensions. + The corresponding dimensions of :attr:`input` are flattened into + one dimension, and the norm is calculated on the flattened + dimension. + + Frobenius norm produces the same result as ``p=2`` in all cases + except when :attr:`dim` is a list of three or more dims, in which + case Frobenius norm throws an error. + + Nuclear norm can only be calculated across exactly two dimensions. + + dim (int, tuple of ints, list of ints, optional): + Specifies which dimension or dimensions of :attr:`input` to + calculate the norm across. If :attr:`dim` is ``None``, the norm will + be calculated across all dimensions of :attr:`input`. If the norm + type indicated by :attr:`p` does not support the specified number of + dimensions, an error will occur. + keepdim (bool, optional): whether the output tensors have :attr:`dim` + retained or not. Ignored if :attr:`dim` = ``None`` and + :attr:`out` = ``None``. Default: ``False`` + out (Tensor, optional): the output tensor. Ignored if + :attr:`dim` = ``None`` and :attr:`out` = ``None``. + dtype (:class:`torch.dtype`, optional): the desired data type of + returned tensor. If specified, the input tensor is casted to + :attr:`dtype` while performing the operation. Default: None. + + .. note:: + Even though ``p='fro'`` supports any number of dimensions, the true + mathematical definition of Frobenius norm only applies to tensors with + exactly two dimensions. :func:`torch.linalg.matrix_norm` with ``ord='fro'`` + aligns with the mathematical definition, since it can only be applied across + exactly two dimensions. + + Example:: + + >>> import torch + >>> a = torch.arange(9, dtype= torch.float) - 4 + >>> b = a.reshape((3, 3)) + >>> torch.norm(a) + tensor(7.7460) + >>> torch.norm(b) + tensor(7.7460) + >>> torch.norm(a, float('inf')) + tensor(4.) + >>> torch.norm(b, float('inf')) + tensor(4.) + >>> c = torch.tensor([[ 1, 2, 3], [-1, 1, 4]] , dtype=torch.float) + >>> torch.norm(c, dim=0) + tensor([1.4142, 2.2361, 5.0000]) + >>> torch.norm(c, dim=1) + tensor([3.7417, 4.2426]) + >>> torch.norm(c, p=1, dim=1) + tensor([6., 6.]) + >>> d = torch.arange(8, dtype=torch.float).reshape(2, 2, 2) + >>> torch.norm(d, dim=(1, 2)) + tensor([ 3.7417, 11.2250]) + >>> torch.norm(d[0, :, :]), torch.norm(d[1, :, :]) + (tensor(3.7417), tensor(11.2250)) + """ + + if has_torch_function_unary(input): + return handle_torch_function( + norm, (input,), input, p=p, dim=dim, keepdim=keepdim, out=out, dtype=dtype) + + # NB. All the repeated code and weird python is to please TorchScript. + # For a more compact implementation see the relevant function in `_refs/__init__.py` + + # We don't do this for MPS or sparse tensors + if input.layout == torch.strided and input.device.type in \ + ("cpu", "cuda", "meta", torch.utils.backend_registration._privateuse1_backend_name): + if dim is not None: + if isinstance(dim, (int, torch.SymInt)): + _dim = [dim] + else: + _dim = dim + else: + _dim = None # type: ignore[assignment] + + if isinstance(p, str): + if p == "fro" and (dim is None or isinstance(dim, (int, torch.SymInt)) or len(dim) <= 2): + if out is None: + return torch.linalg.vector_norm(input, 2, _dim, keepdim, dtype=dtype) + else: + return torch.linalg.vector_norm(input, 2, _dim, keepdim, dtype=dtype, out=out) + + # Here we either call the nuclear norm, or we call matrix_norm with some arguments + # that will throw an error + if _dim is None: + _dim = list(range(input.ndim)) + if out is None: + return torch.linalg.matrix_norm(input, p, _dim, keepdim, dtype=dtype) + else: + return torch.linalg.matrix_norm(input, p, _dim, keepdim, dtype=dtype, out=out) + else: + # NB. p should be Union[str, number], not Optional! + _p = 2.0 if p is None else p + if out is None: + return torch.linalg.vector_norm(input, _p, _dim, keepdim, dtype=dtype) + else: + return torch.linalg.vector_norm(input, _p, _dim, keepdim, dtype=dtype, out=out) + + ndim = input.dim() + + # catch default case + if dim is None and out is None and dtype is None and p is not None: + if isinstance(p, str): + if p == "fro": + return _VF.frobenius_norm(input, dim=(), keepdim=keepdim) + if not isinstance(p, str): + _dim = [i for i in range(ndim)] # noqa: C416 TODO: rewrite as list(range(m)) + return _VF.norm(input, p, dim=_dim, keepdim=keepdim) # type: ignore[attr-defined] + + # TODO: when https://github.com/pytorch/pytorch/issues/33782 is fixed + # remove the overloads where dim is an int and replace with BraodcastingList1 + # and remove next four lines, replace _dim with dim + if dim is not None: + if isinstance(dim, (int, torch.SymInt)): + _dim = [dim] + else: + _dim = dim + else: + _dim = None # type: ignore[assignment] + + if isinstance(p, str): + if p == "fro": + if dtype is not None: + raise ValueError("dtype argument is not supported in frobenius norm") + + if _dim is None: + _dim = list(range(ndim)) + if out is None: + return _VF.frobenius_norm(input, _dim, keepdim=keepdim) # type: ignore[arg-type] + else: + return _VF.frobenius_norm(input, _dim, keepdim=keepdim, out=out) # type: ignore[arg-type] + elif p == "nuc": + if dtype is not None: + raise ValueError("dtype argument is not supported in nuclear norm") + if _dim is None: + if out is None: + return _VF.nuclear_norm(input, keepdim=keepdim) # type: ignore[arg-type] + else: + return _VF.nuclear_norm(input, keepdim=keepdim, out=out) # type: ignore[arg-type] + else: + if out is None: + return _VF.nuclear_norm(input, _dim, keepdim=keepdim) # type: ignore[arg-type] + else: + return _VF.nuclear_norm(input, _dim, keepdim=keepdim, out=out) # type: ignore[arg-type] + raise RuntimeError(f"only valid string values are 'fro' and 'nuc', found {p}") + else: + if _dim is None: + _dim = list(range(ndim)) + + if out is None: + if dtype is None: + return _VF.norm(input, p, _dim, keepdim=keepdim) # type: ignore[attr-defined] + else: + return _VF.norm(input, p, _dim, keepdim=keepdim, dtype=dtype) # type: ignore[attr-defined] + else: + if dtype is None: + return _VF.norm(input, p, _dim, keepdim=keepdim, out=out) # type: ignore[attr-defined] + else: + return _VF.norm(input, p, _dim, keepdim=keepdim, dtype=dtype, out=out) # type: ignore[attr-defined] + +def unravel_index(indices: Tensor, shape: Union[int, Sequence[int], torch.Size]) -> Tuple[Tensor, ...]: + r"""Converts a tensor of flat indices into a tuple of coordinate tensors that + index into an arbitrary tensor of the specified shape. + + Args: + indices (Tensor): An integer tensor containing indices into the + flattened version of an arbitrary tensor of shape :attr:`shape`. + All elements must be in the range ``[0, prod(shape) - 1]``. + + shape (int, sequence of ints, or torch.Size): The shape of the arbitrary + tensor. All elements must be non-negative. + + Returns: + tuple of Tensors: Each ``i``-th tensor in the output corresponds with + dimension ``i`` of :attr:`shape`. Each tensor has the same shape as + ``indices`` and contains one index into dimension ``i`` for each of the + flat indices given by ``indices``. + + Example:: + + >>> import torch + >>> torch.unravel_index(torch.tensor(4), (3, 2)) + (tensor(2), + tensor(0)) + + >>> torch.unravel_index(torch.tensor([4, 1]), (3, 2)) + (tensor([2, 0]), + tensor([0, 1])) + + >>> torch.unravel_index(torch.tensor([0, 1, 2, 3, 4, 5]), (3, 2)) + (tensor([0, 0, 1, 1, 2, 2]), + tensor([0, 1, 0, 1, 0, 1])) + + >>> torch.unravel_index(torch.tensor([1234, 5678]), (10, 10, 10, 10)) + (tensor([1, 5]), + tensor([2, 6]), + tensor([3, 7]), + tensor([4, 8])) + + >>> torch.unravel_index(torch.tensor([[1234], [5678]]), (10, 10, 10, 10)) + (tensor([[1], [5]]), + tensor([[2], [6]]), + tensor([[3], [7]]), + tensor([[4], [8]])) + + >>> torch.unravel_index(torch.tensor([[1234], [5678]]), (100, 100)) + (tensor([[12], [56]]), + tensor([[34], [78]])) + """ + if has_torch_function_unary(indices): + return handle_torch_function( + unravel_index, (indices,), indices, shape=shape) + res_tensor = _unravel_index(indices, shape) + return res_tensor.unbind(-1) + +def _unravel_index(indices: Tensor, shape: Union[int, Sequence[int]]) -> Tensor: + torch._check_type( + not indices.is_complex() and not indices.is_floating_point() and not indices.dtype == torch.bool, + lambda: f"expected 'indices' to be integer dtype, but got {indices.dtype}") + + torch._check_type( + isinstance(shape, (int, torch.SymInt, Sequence)), + lambda: f"expected 'shape' to be int or sequence of ints, but got {type(shape)}") + + if isinstance(shape, (int, torch.SymInt)): + shape = torch.Size([shape]) + else: + for dim in shape: + torch._check_type( + isinstance(dim, (int, torch.SymInt)), + lambda: f"expected 'shape' sequence to only contain ints, but got {type(dim)}") + shape = torch.Size(shape) + + torch._check_value( + all(dim >= 0 for dim in shape), + lambda: f"'shape' cannot have negative values, but got {tuple(shape)}") + + coefs = list(reversed(list(itertools.accumulate(reversed(shape[1:] + torch.Size([1])), func=operator.mul)))) + return indices.unsqueeze(-1).floor_divide( + torch.tensor(coefs, device=indices.device, dtype=torch.int64) + ) % torch.tensor(shape, device=indices.device, dtype=torch.int64) + +def chain_matmul(*matrices, out=None): + r"""Returns the matrix product of the :math:`N` 2-D tensors. This product is efficiently computed + using the matrix chain order algorithm which selects the order in which incurs the lowest cost in terms + of arithmetic operations (`[CLRS]`_). Note that since this is a function to compute the product, :math:`N` + needs to be greater than or equal to 2; if equal to 2 then a trivial matrix-matrix product is returned. + If :math:`N` is 1, then this is a no-op - the original matrix is returned as is. + + .. warning:: + + :func:`torch.chain_matmul` is deprecated and will be removed in a future PyTorch release. + Use :func:`torch.linalg.multi_dot` instead, which accepts a list of two or more tensors + rather than multiple arguments. + + Args: + matrices (Tensors...): a sequence of 2 or more 2-D tensors whose product is to be determined. + out (Tensor, optional): the output tensor. Ignored if :attr:`out` = ``None``. + + Returns: + Tensor: if the :math:`i^{th}` tensor was of dimensions :math:`p_{i} \times p_{i + 1}`, then the product + would be of dimensions :math:`p_{1} \times p_{N + 1}`. + + Example:: + + >>> # xdoctest: +SKIP + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> a = torch.randn(3, 4) + >>> b = torch.randn(4, 5) + >>> c = torch.randn(5, 6) + >>> d = torch.randn(6, 7) + >>> # will raise a deprecation warning + >>> torch.chain_matmul(a, b, c, d) + tensor([[ -2.3375, -3.9790, -4.1119, -6.6577, 9.5609, -11.5095, -3.2614], + [ 21.4038, 3.3378, -8.4982, -5.2457, -10.2561, -2.4684, 2.7163], + [ -0.9647, -5.8917, -2.3213, -5.2284, 12.8615, -12.2816, -2.5095]]) + + .. _`[CLRS]`: https://mitpress.mit.edu/books/introduction-algorithms-third-edition + """ + # This wrapper exists to support variadic args. + if has_torch_function(matrices): + return handle_torch_function(chain_matmul, matrices, *matrices) + + if out is None: + return _VF.chain_matmul(matrices) # type: ignore[attr-defined] + else: + return _VF.chain_matmul(matrices, out=out) # type: ignore[attr-defined] + + +def _lu_impl(A, pivot=True, get_infos=False, out=None): + # type: (Tensor, bool, bool, Any) -> Tuple[Tensor, Tensor, Tensor] + r"""Computes the LU factorization of a matrix or batches of matrices + :attr:`A`. Returns a tuple containing the LU factorization and + pivots of :attr:`A`. Pivoting is done if :attr:`pivot` is set to + ``True``. + + .. warning:: + + :func:`torch.lu` is deprecated in favor of :func:`torch.linalg.lu_factor` + and :func:`torch.linalg.lu_factor_ex`. :func:`torch.lu` will be removed in a + future PyTorch release. + ``LU, pivots, info = torch.lu(A, compute_pivots)`` should be replaced with + + .. code:: python + + LU, pivots = torch.linalg.lu_factor(A, compute_pivots) + + ``LU, pivots, info = torch.lu(A, compute_pivots, get_infos=True)`` should be replaced with + + .. code:: python + + LU, pivots, info = torch.linalg.lu_factor_ex(A, compute_pivots) + + .. note:: + * The returned permutation matrix for every matrix in the batch is + represented by a 1-indexed vector of size ``min(A.shape[-2], A.shape[-1])``. + ``pivots[i] == j`` represents that in the ``i``-th step of the algorithm, + the ``i``-th row was permuted with the ``j-1``-th row. + * LU factorization with :attr:`pivot` = ``False`` is not available + for CPU, and attempting to do so will throw an error. However, + LU factorization with :attr:`pivot` = ``False`` is available for + CUDA. + * This function does not check if the factorization was successful + or not if :attr:`get_infos` is ``True`` since the status of the + factorization is present in the third element of the return tuple. + * In the case of batches of square matrices with size less or equal + to 32 on a CUDA device, the LU factorization is repeated for + singular matrices due to the bug in the MAGMA library + (see magma issue 13). + * ``L``, ``U``, and ``P`` can be derived using :func:`torch.lu_unpack`. + + .. warning:: + The gradients of this function will only be finite when :attr:`A` is full rank. + This is because the LU decomposition is just differentiable at full rank matrices. + Furthermore, if :attr:`A` is close to not being full rank, + the gradient will be numerically unstable as it depends on the computation of :math:`L^{-1}` and :math:`U^{-1}`. + + Args: + A (Tensor): the tensor to factor of size :math:`(*, m, n)` + pivot (bool, optional): controls whether pivoting is done. Default: ``True`` + get_infos (bool, optional): if set to ``True``, returns an info IntTensor. + Default: ``False`` + out (tuple, optional): optional output tuple. If :attr:`get_infos` is ``True``, + then the elements in the tuple are Tensor, IntTensor, + and IntTensor. If :attr:`get_infos` is ``False``, then the + elements in the tuple are Tensor, IntTensor. Default: ``None`` + + Returns: + (Tensor, IntTensor, IntTensor (optional)): A tuple of tensors containing + + - **factorization** (*Tensor*): the factorization of size :math:`(*, m, n)` + + - **pivots** (*IntTensor*): the pivots of size :math:`(*, \text{min}(m, n))`. + ``pivots`` stores all the intermediate transpositions of rows. + The final permutation ``perm`` could be reconstructed by + applying ``swap(perm[i], perm[pivots[i] - 1])`` for ``i = 0, ..., pivots.size(-1) - 1``, + where ``perm`` is initially the identity permutation of :math:`m` elements + (essentially this is what :func:`torch.lu_unpack` is doing). + + - **infos** (*IntTensor*, *optional*): if :attr:`get_infos` is ``True``, this is a tensor of + size :math:`(*)` where non-zero values indicate whether factorization for the matrix or + each minibatch has succeeded or failed + + Example:: + + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK) + >>> # xdoctest: +IGNORE_WANT("non-deterministic") + >>> A = torch.randn(2, 3, 3) + >>> A_LU, pivots = torch.lu(A) + >>> A_LU + tensor([[[ 1.3506, 2.5558, -0.0816], + [ 0.1684, 1.1551, 0.1940], + [ 0.1193, 0.6189, -0.5497]], + + [[ 0.4526, 1.2526, -0.3285], + [-0.7988, 0.7175, -0.9701], + [ 0.2634, -0.9255, -0.3459]]]) + >>> pivots + tensor([[ 3, 3, 3], + [ 3, 3, 3]], dtype=torch.int32) + >>> A_LU, pivots, info = torch.lu(A, get_infos=True) + >>> if info.nonzero().size(0) == 0: + ... print('LU factorization succeeded for all samples!') + LU factorization succeeded for all samples! + """ + # If get_infos is True, then we don't need to check for errors and vice versa + return torch._lu_with_info(A, pivot=pivot, check_errors=(not get_infos)) + +if TYPE_CHECKING: + _ListOrSeq = Sequence[Tensor] +else: + _ListOrSeq = List[Tensor] + + +def _check_list_size(out_len: int, get_infos: bool, out: _ListOrSeq) -> None: + get_infos_int = 1 if get_infos else 0 + if out_len - get_infos_int != 2: + raise TypeError(f"expected tuple of {2 + int(get_infos)} elements but got {out_len}") + if not isinstance(out, (tuple, list)): + raise TypeError(f"argument 'out' must be tuple of Tensors, not {type(out).__name__}") + + +def _lu_with_infos(A, pivot=True, get_infos=False, out=None): + # type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor, Tensor]]) -> Tuple[Tensor, Tensor, Tensor] + if has_torch_function_unary(A): + return handle_torch_function( + lu, (A,), A, pivot=pivot, get_infos=get_infos, out=out) + result = _lu_impl(A, pivot, get_infos, out) + if out is not None: + _check_list_size(len(out), get_infos, out) + for i in range(len(out)): + out[i].resize_as_(result[i]).copy_(result[i]) + return out + else: + return result # A_LU, pivots, infos + + +def _lu_no_infos(A, pivot=True, get_infos=False, out=None): + # type: (Tensor, bool, bool, Optional[Tuple[Tensor, Tensor]]) -> Tuple[Tensor, Tensor] + # need to check for torch_function here so that we exit if + if has_torch_function_unary(A): + return handle_torch_function( + lu, (A,), A, pivot=pivot, get_infos=get_infos, out=out) + result = _lu_impl(A, pivot, get_infos, out) + if out is not None: + _check_list_size(len(out), get_infos, out) + for i in range(len(out)): + out[i].resize_as_(result[i]).copy_(result[i]) + return out + else: + return result[0], result[1] # A_LU, pivots + +# The return type of lu depends on `get_infos`, so in order to resolve the output type +# of lu in TorchScript we need to statically know the value of `get_infos` +lu = boolean_dispatch( + arg_name='get_infos', + arg_index=2, + default=False, + if_true=_lu_with_infos, + if_false=_lu_no_infos, + module_name=__name__, + func_name='lu') +lu.__doc__ = _lu_impl.__doc__ + + +def align_tensors(*tensors): + raise RuntimeError('`align_tensors` not yet implemented.') diff --git a/llmeval-env/lib/python3.10/site-packages/torch/hub.py b/llmeval-env/lib/python3.10/site-packages/torch/hub.py new file mode 100644 index 0000000000000000000000000000000000000000..5eef08e83d3de919913fd09b66a423ee9312bfcb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/hub.py @@ -0,0 +1,764 @@ +import contextlib +import errno +import hashlib +import json +import os +import re +import shutil +import sys +import tempfile +import torch +import uuid +import warnings +import zipfile +from pathlib import Path +from typing import Dict, Optional, Any +from urllib.error import HTTPError, URLError +from urllib.request import urlopen, Request +from urllib.parse import urlparse # noqa: F401 +from torch.serialization import MAP_LOCATION + +class _Faketqdm: # type: ignore[no-redef] + + def __init__(self, total=None, disable=False, + unit=None, *args, **kwargs): + self.total = total + self.disable = disable + self.n = 0 + # Ignore all extra *args and **kwargs lest you want to reinvent tqdm + + def update(self, n): + if self.disable: + return + + self.n += n + if self.total is None: + sys.stderr.write(f"\r{self.n:.1f} bytes") + else: + sys.stderr.write(f"\r{100 * self.n / float(self.total):.1f}%") + sys.stderr.flush() + + # Don't bother implementing; use real tqdm if you want + def set_description(self, *args, **kwargs): + pass + + def write(self, s): + sys.stderr.write(f"{s}\n") + + def close(self): + self.disable = True + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if self.disable: + return + + sys.stderr.write('\n') + +try: + from tqdm import tqdm # If tqdm is installed use it, otherwise use the fake wrapper +except ImportError: + tqdm = _Faketqdm + +__all__ = [ + 'download_url_to_file', + 'get_dir', + 'help', + 'list', + 'load', + 'load_state_dict_from_url', + 'set_dir', +] + +# matches bfd8deac from resnet18-bfd8deac.pth +HASH_REGEX = re.compile(r'-([a-f0-9]*)\.') + +_TRUSTED_REPO_OWNERS = ("facebookresearch", "facebookincubator", "pytorch", "fairinternal") +ENV_GITHUB_TOKEN = 'GITHUB_TOKEN' +ENV_TORCH_HOME = 'TORCH_HOME' +ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME' +DEFAULT_CACHE_DIR = '~/.cache' +VAR_DEPENDENCY = 'dependencies' +MODULE_HUBCONF = 'hubconf.py' +READ_DATA_CHUNK = 128 * 1024 +_hub_dir: Optional[str] = None + + +@contextlib.contextmanager +def _add_to_sys_path(path): + sys.path.insert(0, path) + try: + yield + finally: + sys.path.remove(path) + + +# Copied from tools/shared/module_loader to be included in torch package +def _import_module(name, path): + import importlib.util + from importlib.abc import Loader + spec = importlib.util.spec_from_file_location(name, path) + assert spec is not None + module = importlib.util.module_from_spec(spec) + assert isinstance(spec.loader, Loader) + spec.loader.exec_module(module) + return module + + +def _remove_if_exists(path): + if os.path.exists(path): + if os.path.isfile(path): + os.remove(path) + else: + shutil.rmtree(path) + + +def _git_archive_link(repo_owner, repo_name, ref): + # See https://docs.github.com/en/rest/reference/repos#download-a-repository-archive-zip + return f"https://github.com/{repo_owner}/{repo_name}/zipball/{ref}" + + +def _load_attr_from_module(module, func_name): + # Check if callable is defined in the module + if func_name not in dir(module): + return None + return getattr(module, func_name) + + +def _get_torch_home(): + torch_home = os.path.expanduser( + os.getenv(ENV_TORCH_HOME, + os.path.join(os.getenv(ENV_XDG_CACHE_HOME, + DEFAULT_CACHE_DIR), 'torch'))) + return torch_home + + +def _parse_repo_info(github): + if ':' in github: + repo_info, ref = github.split(':') + else: + repo_info, ref = github, None + repo_owner, repo_name = repo_info.split('/') + + if ref is None: + # The ref wasn't specified by the user, so we need to figure out the + # default branch: main or master. Our assumption is that if main exists + # then it's the default branch, otherwise it's master. + try: + with urlopen(f"https://github.com/{repo_owner}/{repo_name}/tree/main/"): + ref = 'main' + except HTTPError as e: + if e.code == 404: + ref = 'master' + else: + raise + except URLError as e: + # No internet connection, need to check for cache as last resort + for possible_ref in ("main", "master"): + if os.path.exists(f"{get_dir()}/{repo_owner}_{repo_name}_{possible_ref}"): + ref = possible_ref + break + if ref is None: + raise RuntimeError( + "It looks like there is no internet connection and the " + f"repo could not be found in the cache ({get_dir()})" + ) from e + return repo_owner, repo_name, ref + + +def _read_url(url): + with urlopen(url) as r: + return r.read().decode(r.headers.get_content_charset('utf-8')) + + +def _validate_not_a_forked_repo(repo_owner, repo_name, ref): + # Use urlopen to avoid depending on local git. + headers = {'Accept': 'application/vnd.github.v3+json'} + token = os.environ.get(ENV_GITHUB_TOKEN) + if token is not None: + headers['Authorization'] = f'token {token}' + for url_prefix in ( + f'https://api.github.com/repos/{repo_owner}/{repo_name}/branches', + f'https://api.github.com/repos/{repo_owner}/{repo_name}/tags'): + page = 0 + while True: + page += 1 + url = f'{url_prefix}?per_page=100&page={page}' + response = json.loads(_read_url(Request(url, headers=headers))) + # Empty response means no more data to process + if not response: + break + for br in response: + if br['name'] == ref or br['commit']['sha'].startswith(ref): + return + + raise ValueError(f'Cannot find {ref} in https://github.com/{repo_owner}/{repo_name}. ' + 'If it\'s a commit from a forked repo, please call hub.load() with forked repo directly.') + + +def _get_cache_or_reload(github, force_reload, trust_repo, calling_fn, verbose=True, skip_validation=False): + # Setup hub_dir to save downloaded files + hub_dir = get_dir() + os.makedirs(hub_dir, exist_ok=True) + # Parse github repo information + repo_owner, repo_name, ref = _parse_repo_info(github) + # Github allows branch name with slash '/', + # this causes confusion with path on both Linux and Windows. + # Backslash is not allowed in Github branch name so no need to + # to worry about it. + normalized_br = ref.replace('/', '_') + # Github renames folder repo-v1.x.x to repo-1.x.x + # We don't know the repo name before downloading the zip file + # and inspect name from it. + # To check if cached repo exists, we need to normalize folder names. + owner_name_branch = '_'.join([repo_owner, repo_name, normalized_br]) + repo_dir = os.path.join(hub_dir, owner_name_branch) + # Check that the repo is in the trusted list + _check_repo_is_trusted(repo_owner, repo_name, owner_name_branch, trust_repo=trust_repo, calling_fn=calling_fn) + + use_cache = (not force_reload) and os.path.exists(repo_dir) + + if use_cache: + if verbose: + sys.stderr.write(f'Using cache found in {repo_dir}\n') + else: + # Validate the tag/branch is from the original repo instead of a forked repo + if not skip_validation: + _validate_not_a_forked_repo(repo_owner, repo_name, ref) + + cached_file = os.path.join(hub_dir, normalized_br + '.zip') + _remove_if_exists(cached_file) + + try: + url = _git_archive_link(repo_owner, repo_name, ref) + sys.stderr.write(f'Downloading: \"{url}\" to {cached_file}\n') + download_url_to_file(url, cached_file, progress=False) + except HTTPError as err: + if err.code == 300: + # Getting a 300 Multiple Choices error likely means that the ref is both a tag and a branch + # in the repo. This can be disambiguated by explicitely using refs/heads/ or refs/tags + # See https://git-scm.com/book/en/v2/Git-Internals-Git-References + # Here, we do the same as git: we throw a warning, and assume the user wanted the branch + warnings.warn( + f"The ref {ref} is ambiguous. Perhaps it is both a tag and a branch in the repo? " + "Torchhub will now assume that it's a branch. " + "You can disambiguate tags and branches by explicitly passing refs/heads/branch_name or " + "refs/tags/tag_name as the ref. That might require using skip_validation=True." + ) + disambiguated_branch_ref = f"refs/heads/{ref}" + url = _git_archive_link(repo_owner, repo_name, ref=disambiguated_branch_ref) + download_url_to_file(url, cached_file, progress=False) + else: + raise + + with zipfile.ZipFile(cached_file) as cached_zipfile: + extraced_repo_name = cached_zipfile.infolist()[0].filename + extracted_repo = os.path.join(hub_dir, extraced_repo_name) + _remove_if_exists(extracted_repo) + # Unzip the code and rename the base folder + cached_zipfile.extractall(hub_dir) + + _remove_if_exists(cached_file) + _remove_if_exists(repo_dir) + shutil.move(extracted_repo, repo_dir) # rename the repo + + return repo_dir + + +def _check_repo_is_trusted(repo_owner, repo_name, owner_name_branch, trust_repo, calling_fn="load"): + hub_dir = get_dir() + filepath = os.path.join(hub_dir, "trusted_list") + + if not os.path.exists(filepath): + Path(filepath).touch() + with open(filepath) as file: + trusted_repos = tuple(line.strip() for line in file) + + # To minimize friction of introducing the new trust_repo mechanism, we consider that + # if a repo was already downloaded by torchhub, then it is already trusted (even if it's not in the allowlist) + trusted_repos_legacy = next(os.walk(hub_dir))[1] + + owner_name = '_'.join([repo_owner, repo_name]) + is_trusted = ( + owner_name in trusted_repos + or owner_name_branch in trusted_repos_legacy + or repo_owner in _TRUSTED_REPO_OWNERS + ) + + # TODO: Remove `None` option in 2.0 and change the default to "check" + if trust_repo is None: + if not is_trusted: + warnings.warn( + "You are about to download and run code from an untrusted repository. In a future release, this won't " + "be allowed. To add the repository to your trusted list, change the command to {calling_fn}(..., " + "trust_repo=False) and a command prompt will appear asking for an explicit confirmation of trust, " + f"or {calling_fn}(..., trust_repo=True), which will assume that the prompt is to be answered with " + f"'yes'. You can also use {calling_fn}(..., trust_repo='check') which will only prompt for " + f"confirmation if the repo is not already trusted. This will eventually be the default behaviour") + return + + if (trust_repo is False) or (trust_repo == "check" and not is_trusted): + response = input( + f"The repository {owner_name} does not belong to the list of trusted repositories and as such cannot be downloaded. " + "Do you trust this repository and wish to add it to the trusted list of repositories (y/N)?") + if response.lower() in ("y", "yes"): + if is_trusted: + print("The repository is already trusted.") + elif response.lower() in ("n", "no", ""): + raise Exception("Untrusted repository.") + else: + raise ValueError(f"Unrecognized response {response}.") + + # At this point we're sure that the user trusts the repo (or wants to trust it) + if not is_trusted: + with open(filepath, "a") as file: + file.write(owner_name + "\n") + + +def _check_module_exists(name): + import importlib.util + return importlib.util.find_spec(name) is not None + + +def _check_dependencies(m): + dependencies = _load_attr_from_module(m, VAR_DEPENDENCY) + + if dependencies is not None: + missing_deps = [pkg for pkg in dependencies if not _check_module_exists(pkg)] + if len(missing_deps): + raise RuntimeError(f"Missing dependencies: {', '.join(missing_deps)}") + + +def _load_entry_from_hubconf(m, model): + if not isinstance(model, str): + raise ValueError('Invalid input: model should be a string of function name') + + # Note that if a missing dependency is imported at top level of hubconf, it will + # throw before this function. It's a chicken and egg situation where we have to + # load hubconf to know what're the dependencies, but to import hubconf it requires + # a missing package. This is fine, Python will throw proper error message for users. + _check_dependencies(m) + + func = _load_attr_from_module(m, model) + + if func is None or not callable(func): + raise RuntimeError(f'Cannot find callable {model} in hubconf') + + return func + + +def get_dir(): + r""" + Get the Torch Hub cache directory used for storing downloaded models & weights. + + If :func:`~torch.hub.set_dir` is not called, default path is ``$TORCH_HOME/hub`` where + environment variable ``$TORCH_HOME`` defaults to ``$XDG_CACHE_HOME/torch``. + ``$XDG_CACHE_HOME`` follows the X Design Group specification of the Linux + filesystem layout, with a default value ``~/.cache`` if the environment + variable is not set. + """ + # Issue warning to move data if old env is set + if os.getenv('TORCH_HUB'): + warnings.warn('TORCH_HUB is deprecated, please use env TORCH_HOME instead') + + if _hub_dir is not None: + return _hub_dir + return os.path.join(_get_torch_home(), 'hub') + + +def set_dir(d): + r""" + Optionally set the Torch Hub directory used to save downloaded models & weights. + + Args: + d (str): path to a local folder to save downloaded models & weights. + """ + global _hub_dir + _hub_dir = os.path.expanduser(d) + + +def list(github, force_reload=False, skip_validation=False, trust_repo=None, verbose=True): + r""" + List all callable entrypoints available in the repo specified by ``github``. + + Args: + github (str): a string with format "repo_owner/repo_name[:ref]" with an optional + ref (tag or branch). If ``ref`` is not specified, the default branch is assumed to be ``main`` if + it exists, and otherwise ``master``. + Example: 'pytorch/vision:0.10' + force_reload (bool, optional): whether to discard the existing cache and force a fresh download. + Default is ``False``. + skip_validation (bool, optional): if ``False``, torchhub will check that the branch or commit + specified by the ``github`` argument properly belongs to the repo owner. This will make + requests to the GitHub API; you can specify a non-default GitHub token by setting the + ``GITHUB_TOKEN`` environment variable. Default is ``False``. + trust_repo (bool, str or None): ``"check"``, ``True``, ``False`` or ``None``. + This parameter was introduced in v1.12 and helps ensuring that users + only run code from repos that they trust. + + - If ``False``, a prompt will ask the user whether the repo should + be trusted. + - If ``True``, the repo will be added to the trusted list and loaded + without requiring explicit confirmation. + - If ``"check"``, the repo will be checked against the list of + trusted repos in the cache. If it is not present in that list, the + behaviour will fall back onto the ``trust_repo=False`` option. + - If ``None``: this will raise a warning, inviting the user to set + ``trust_repo`` to either ``False``, ``True`` or ``"check"``. This + is only present for backward compatibility and will be removed in + v2.0. + + Default is ``None`` and will eventually change to ``"check"`` in v2.0. + verbose (bool, optional): If ``False``, mute messages about hitting + local caches. Note that the message about first download cannot be + muted. Default is ``True``. + + Returns: + list: The available callables entrypoint + + Example: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_HUB) + >>> entrypoints = torch.hub.list('pytorch/vision', force_reload=True) + """ + repo_dir = _get_cache_or_reload(github, force_reload, trust_repo, "list", verbose=verbose, + skip_validation=skip_validation) + + with _add_to_sys_path(repo_dir): + hubconf_path = os.path.join(repo_dir, MODULE_HUBCONF) + hub_module = _import_module(MODULE_HUBCONF, hubconf_path) + + # We take functions starts with '_' as internal helper functions + entrypoints = [f for f in dir(hub_module) if callable(getattr(hub_module, f)) and not f.startswith('_')] + + return entrypoints + + +def help(github, model, force_reload=False, skip_validation=False, trust_repo=None): + r""" + Show the docstring of entrypoint ``model``. + + Args: + github (str): a string with format with an optional + ref (a tag or a branch). If ``ref`` is not specified, the default branch is assumed + to be ``main`` if it exists, and otherwise ``master``. + Example: 'pytorch/vision:0.10' + model (str): a string of entrypoint name defined in repo's ``hubconf.py`` + force_reload (bool, optional): whether to discard the existing cache and force a fresh download. + Default is ``False``. + skip_validation (bool, optional): if ``False``, torchhub will check that the ref + specified by the ``github`` argument properly belongs to the repo owner. This will make + requests to the GitHub API; you can specify a non-default GitHub token by setting the + ``GITHUB_TOKEN`` environment variable. Default is ``False``. + trust_repo (bool, str or None): ``"check"``, ``True``, ``False`` or ``None``. + This parameter was introduced in v1.12 and helps ensuring that users + only run code from repos that they trust. + + - If ``False``, a prompt will ask the user whether the repo should + be trusted. + - If ``True``, the repo will be added to the trusted list and loaded + without requiring explicit confirmation. + - If ``"check"``, the repo will be checked against the list of + trusted repos in the cache. If it is not present in that list, the + behaviour will fall back onto the ``trust_repo=False`` option. + - If ``None``: this will raise a warning, inviting the user to set + ``trust_repo`` to either ``False``, ``True`` or ``"check"``. This + is only present for backward compatibility and will be removed in + v2.0. + + Default is ``None`` and will eventually change to ``"check"`` in v2.0. + Example: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_HUB) + >>> print(torch.hub.help('pytorch/vision', 'resnet18', force_reload=True)) + """ + repo_dir = _get_cache_or_reload(github, force_reload, trust_repo, "help", verbose=True, + skip_validation=skip_validation) + + with _add_to_sys_path(repo_dir): + hubconf_path = os.path.join(repo_dir, MODULE_HUBCONF) + hub_module = _import_module(MODULE_HUBCONF, hubconf_path) + + entry = _load_entry_from_hubconf(hub_module, model) + + return entry.__doc__ + + +def load(repo_or_dir, model, *args, source='github', trust_repo=None, force_reload=False, verbose=True, + skip_validation=False, + **kwargs): + r""" + Load a model from a github repo or a local directory. + + Note: Loading a model is the typical use case, but this can also be used to + for loading other objects such as tokenizers, loss functions, etc. + + If ``source`` is 'github', ``repo_or_dir`` is expected to be + of the form ``repo_owner/repo_name[:ref]`` with an optional + ref (a tag or a branch). + + If ``source`` is 'local', ``repo_or_dir`` is expected to be a + path to a local directory. + + Args: + repo_or_dir (str): If ``source`` is 'github', + this should correspond to a github repo with format ``repo_owner/repo_name[:ref]`` with + an optional ref (tag or branch), for example 'pytorch/vision:0.10'. If ``ref`` is not specified, + the default branch is assumed to be ``main`` if it exists, and otherwise ``master``. + If ``source`` is 'local' then it should be a path to a local directory. + model (str): the name of a callable (entrypoint) defined in the + repo/dir's ``hubconf.py``. + *args (optional): the corresponding args for callable ``model``. + source (str, optional): 'github' or 'local'. Specifies how + ``repo_or_dir`` is to be interpreted. Default is 'github'. + trust_repo (bool, str or None): ``"check"``, ``True``, ``False`` or ``None``. + This parameter was introduced in v1.12 and helps ensuring that users + only run code from repos that they trust. + + - If ``False``, a prompt will ask the user whether the repo should + be trusted. + - If ``True``, the repo will be added to the trusted list and loaded + without requiring explicit confirmation. + - If ``"check"``, the repo will be checked against the list of + trusted repos in the cache. If it is not present in that list, the + behaviour will fall back onto the ``trust_repo=False`` option. + - If ``None``: this will raise a warning, inviting the user to set + ``trust_repo`` to either ``False``, ``True`` or ``"check"``. This + is only present for backward compatibility and will be removed in + v2.0. + + Default is ``None`` and will eventually change to ``"check"`` in v2.0. + force_reload (bool, optional): whether to force a fresh download of + the github repo unconditionally. Does not have any effect if + ``source = 'local'``. Default is ``False``. + verbose (bool, optional): If ``False``, mute messages about hitting + local caches. Note that the message about first download cannot be + muted. Does not have any effect if ``source = 'local'``. + Default is ``True``. + skip_validation (bool, optional): if ``False``, torchhub will check that the branch or commit + specified by the ``github`` argument properly belongs to the repo owner. This will make + requests to the GitHub API; you can specify a non-default GitHub token by setting the + ``GITHUB_TOKEN`` environment variable. Default is ``False``. + **kwargs (optional): the corresponding kwargs for callable ``model``. + + Returns: + The output of the ``model`` callable when called with the given + ``*args`` and ``**kwargs``. + + Example: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_HUB) + >>> # from a github repo + >>> repo = 'pytorch/vision' + >>> model = torch.hub.load(repo, 'resnet50', weights='ResNet50_Weights.IMAGENET1K_V1') + >>> # from a local directory + >>> path = '/some/local/path/pytorch/vision' + >>> # xdoctest: +SKIP + >>> model = torch.hub.load(path, 'resnet50', weights='ResNet50_Weights.DEFAULT') + """ + source = source.lower() + + if source not in ('github', 'local'): + raise ValueError( + f'Unknown source: "{source}". Allowed values: "github" | "local".') + + if source == 'github': + repo_or_dir = _get_cache_or_reload(repo_or_dir, force_reload, trust_repo, "load", + verbose=verbose, skip_validation=skip_validation) + + model = _load_local(repo_or_dir, model, *args, **kwargs) + return model + + +def _load_local(hubconf_dir, model, *args, **kwargs): + r""" + Load a model from a local directory with a ``hubconf.py``. + + Args: + hubconf_dir (str): path to a local directory that contains a + ``hubconf.py``. + model (str): name of an entrypoint defined in the directory's + ``hubconf.py``. + *args (optional): the corresponding args for callable ``model``. + **kwargs (optional): the corresponding kwargs for callable ``model``. + + Returns: + a single model with corresponding pretrained weights. + + Example: + >>> # xdoctest: +SKIP("stub local path") + >>> path = '/some/local/path/pytorch/vision' + >>> model = _load_local(path, 'resnet50', weights='ResNet50_Weights.IMAGENET1K_V1') + """ + with _add_to_sys_path(hubconf_dir): + hubconf_path = os.path.join(hubconf_dir, MODULE_HUBCONF) + hub_module = _import_module(MODULE_HUBCONF, hubconf_path) + + entry = _load_entry_from_hubconf(hub_module, model) + model = entry(*args, **kwargs) + + return model + + +def download_url_to_file(url: str, dst: str, hash_prefix: Optional[str] = None, + progress: bool = True) -> None: + r"""Download object at the given URL to a local path. + + Args: + url (str): URL of the object to download + dst (str): Full path where object will be saved, e.g. ``/tmp/temporary_file`` + hash_prefix (str, optional): If not None, the SHA256 downloaded file should start with ``hash_prefix``. + Default: None + progress (bool, optional): whether or not to display a progress bar to stderr + Default: True + + Example: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_HUB) + >>> # xdoctest: +REQUIRES(POSIX) + >>> torch.hub.download_url_to_file('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth', '/tmp/temporary_file') + + """ + file_size = None + req = Request(url, headers={"User-Agent": "torch.hub"}) + u = urlopen(req) + meta = u.info() + if hasattr(meta, 'getheaders'): + content_length = meta.getheaders("Content-Length") + else: + content_length = meta.get_all("Content-Length") + if content_length is not None and len(content_length) > 0: + file_size = int(content_length[0]) + + # We deliberately save it in a temp file and move it after + # download is complete. This prevents a local working checkpoint + # being overridden by a broken download. + # We deliberately do not use NamedTemporaryFile to avoid restrictive + # file permissions being applied to the downloaded file. + dst = os.path.expanduser(dst) + for seq in range(tempfile.TMP_MAX): + tmp_dst = dst + '.' + uuid.uuid4().hex + '.partial' + try: + f = open(tmp_dst, 'w+b') + except FileExistsError: + continue + break + else: + raise FileExistsError(errno.EEXIST, 'No usable temporary file name found') + + try: + if hash_prefix is not None: + sha256 = hashlib.sha256() + with tqdm(total=file_size, disable=not progress, + unit='B', unit_scale=True, unit_divisor=1024) as pbar: + while True: + buffer = u.read(READ_DATA_CHUNK) + if len(buffer) == 0: + break + f.write(buffer) # type: ignore[possibly-undefined] + if hash_prefix is not None: + sha256.update(buffer) # type: ignore[possibly-undefined] + pbar.update(len(buffer)) + + f.close() + if hash_prefix is not None: + digest = sha256.hexdigest() # type: ignore[possibly-undefined] + if digest[:len(hash_prefix)] != hash_prefix: + raise RuntimeError(f'invalid hash value (expected "{hash_prefix}", got "{digest}")') + shutil.move(f.name, dst) + finally: + f.close() + if os.path.exists(f.name): + os.remove(f.name) + + +# Hub used to support automatically extracts from zipfile manually compressed by users. +# The legacy zip format expects only one file from torch.save() < 1.6 in the zip. +# We should remove this support since zipfile is now default zipfile format for torch.save(). +def _is_legacy_zip_format(filename: str) -> bool: + if zipfile.is_zipfile(filename): + infolist = zipfile.ZipFile(filename).infolist() + return len(infolist) == 1 and not infolist[0].is_dir() + return False + + +def _legacy_zip_load(filename: str, model_dir: str, map_location: MAP_LOCATION, weights_only: bool) -> Dict[str, Any]: + warnings.warn('Falling back to the old format < 1.6. This support will be ' + 'deprecated in favor of default zipfile format introduced in 1.6. ' + 'Please redo torch.save() to save it in the new zipfile format.') + # Note: extractall() defaults to overwrite file if exists. No need to clean up beforehand. + # We deliberately don't handle tarfile here since our legacy serialization format was in tar. + # E.g. resnet18-5c106cde.pth which is widely used. + with zipfile.ZipFile(filename) as f: + members = f.infolist() + if len(members) != 1: + raise RuntimeError('Only one file(not dir) is allowed in the zipfile') + f.extractall(model_dir) + extraced_name = members[0].filename + extracted_file = os.path.join(model_dir, extraced_name) + return torch.load(extracted_file, map_location=map_location, weights_only=weights_only) + + +def load_state_dict_from_url( + url: str, + model_dir: Optional[str] = None, + map_location: MAP_LOCATION = None, + progress: bool = True, + check_hash: bool = False, + file_name: Optional[str] = None, + weights_only: bool = False, +) -> Dict[str, Any]: + r"""Loads the Torch serialized object at the given URL. + + If downloaded file is a zip file, it will be automatically + decompressed. + + If the object is already present in `model_dir`, it's deserialized and + returned. + The default value of ``model_dir`` is ``/checkpoints`` where + ``hub_dir`` is the directory returned by :func:`~torch.hub.get_dir`. + + Args: + url (str): URL of the object to download + model_dir (str, optional): directory in which to save the object + map_location (optional): a function or a dict specifying how to remap storage locations (see torch.load) + progress (bool, optional): whether or not to display a progress bar to stderr. + Default: True + check_hash(bool, optional): If True, the filename part of the URL should follow the naming convention + ``filename-.ext`` where ```` is the first eight or more + digits of the SHA256 hash of the contents of the file. The hash is used to + ensure unique names and to verify the contents of the file. + Default: False + file_name (str, optional): name for the downloaded file. Filename from ``url`` will be used if not set. + weights_only(bool, optional): If True, only weights will be loaded and no complex pickled objects. + Recommended for untrusted sources. See :func:`~torch.load` for more details. + + Example: + >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_HUB) + >>> state_dict = torch.hub.load_state_dict_from_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth') + + """ + # Issue warning to move data if old env is set + if os.getenv('TORCH_MODEL_ZOO'): + warnings.warn('TORCH_MODEL_ZOO is deprecated, please use env TORCH_HOME instead') + + if model_dir is None: + hub_dir = get_dir() + model_dir = os.path.join(hub_dir, 'checkpoints') + + os.makedirs(model_dir, exist_ok=True) + + parts = urlparse(url) + filename = os.path.basename(parts.path) + if file_name is not None: + filename = file_name + cached_file = os.path.join(model_dir, filename) + if not os.path.exists(cached_file): + sys.stderr.write(f'Downloading: "{url}" to {cached_file}\n') + hash_prefix = None + if check_hash: + r = HASH_REGEX.search(filename) # r is Optional[Match[str]] + hash_prefix = r.group(1) if r else None + download_url_to_file(url, cached_file, hash_prefix, progress=progress) + + if _is_legacy_zip_format(cached_file): + return _legacy_zip_load(cached_file, model_dir, map_location, weights_only) + return torch.load(cached_file, map_location=map_location, weights_only=weights_only) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/py.typed b/llmeval-env/lib/python3.10/site-packages/torch/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/quasirandom.py b/llmeval-env/lib/python3.10/site-packages/torch/quasirandom.py new file mode 100644 index 0000000000000000000000000000000000000000..1c9b949c55651c42895c1a1afb6d9050d41aca2f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/quasirandom.py @@ -0,0 +1,180 @@ +import torch +from typing import Optional + + +class SobolEngine: + r""" + The :class:`torch.quasirandom.SobolEngine` is an engine for generating + (scrambled) Sobol sequences. Sobol sequences are an example of low + discrepancy quasi-random sequences. + + This implementation of an engine for Sobol sequences is capable of + sampling sequences up to a maximum dimension of 21201. It uses direction + numbers from https://web.maths.unsw.edu.au/~fkuo/sobol/ obtained using the + search criterion D(6) up to the dimension 21201. This is the recommended + choice by the authors. + + References: + - Art B. Owen. Scrambling Sobol and Niederreiter-Xing points. + Journal of Complexity, 14(4):466-489, December 1998. + + - I. M. Sobol. The distribution of points in a cube and the accurate + evaluation of integrals. + Zh. Vychisl. Mat. i Mat. Phys., 7:784-802, 1967. + + Args: + dimension (Int): The dimensionality of the sequence to be drawn + scramble (bool, optional): Setting this to ``True`` will produce + scrambled Sobol sequences. Scrambling is + capable of producing better Sobol + sequences. Default: ``False``. + seed (Int, optional): This is the seed for the scrambling. The seed + of the random number generator is set to this, + if specified. Otherwise, it uses a random seed. + Default: ``None`` + + Examples:: + + >>> # xdoctest: +SKIP("unseeded random state") + >>> soboleng = torch.quasirandom.SobolEngine(dimension=5) + >>> soboleng.draw(3) + tensor([[0.0000, 0.0000, 0.0000, 0.0000, 0.0000], + [0.5000, 0.5000, 0.5000, 0.5000, 0.5000], + [0.7500, 0.2500, 0.2500, 0.2500, 0.7500]]) + """ + MAXBIT = 30 + MAXDIM = 21201 + + def __init__(self, dimension, scramble=False, seed=None): + if dimension > self.MAXDIM or dimension < 1: + raise ValueError("Supported range of dimensionality " + f"for SobolEngine is [1, {self.MAXDIM}]") + + self.seed = seed + self.scramble = scramble + self.dimension = dimension + + cpu = torch.device("cpu") + + self.sobolstate = torch.zeros(dimension, self.MAXBIT, device=cpu, dtype=torch.long) + torch._sobol_engine_initialize_state_(self.sobolstate, self.dimension) + + if not self.scramble: + self.shift = torch.zeros(self.dimension, device=cpu, dtype=torch.long) + else: + self._scramble() + + self.quasi = self.shift.clone(memory_format=torch.contiguous_format) + self._first_point = (self.quasi / 2 ** self.MAXBIT).reshape(1, -1) + self.num_generated = 0 + + def draw(self, n: int = 1, out: Optional[torch.Tensor] = None, + dtype: torch.dtype = torch.float32) -> torch.Tensor: + r""" + Function to draw a sequence of :attr:`n` points from a Sobol sequence. + Note that the samples are dependent on the previous samples. The size + of the result is :math:`(n, dimension)`. + + Args: + n (Int, optional): The length of sequence of points to draw. + Default: 1 + out (Tensor, optional): The output tensor + dtype (:class:`torch.dtype`, optional): the desired data type of the + returned tensor. + Default: ``torch.float32`` + """ + if self.num_generated == 0: + if n == 1: + result = self._first_point.to(dtype) + else: + result, self.quasi = torch._sobol_engine_draw( + self.quasi, n - 1, self.sobolstate, self.dimension, self.num_generated, dtype=dtype, + ) + result = torch.cat((self._first_point, result), dim=-2) + else: + result, self.quasi = torch._sobol_engine_draw( + self.quasi, n, self.sobolstate, self.dimension, self.num_generated - 1, dtype=dtype, + ) + + self.num_generated += n + + if out is not None: + out.resize_as_(result).copy_(result) + return out + + return result + + def draw_base2(self, m: int, out: Optional[torch.Tensor] = None, + dtype: torch.dtype = torch.float32) -> torch.Tensor: + r""" + Function to draw a sequence of :attr:`2**m` points from a Sobol sequence. + Note that the samples are dependent on the previous samples. The size + of the result is :math:`(2**m, dimension)`. + + Args: + m (Int): The (base2) exponent of the number of points to draw. + out (Tensor, optional): The output tensor + dtype (:class:`torch.dtype`, optional): the desired data type of the + returned tensor. + Default: ``torch.float32`` + """ + n = 2 ** m + total_n = self.num_generated + n + if not (total_n & (total_n - 1) == 0): + raise ValueError("The balance properties of Sobol' points require " + f"n to be a power of 2. {self.num_generated} points have been " + f"previously generated, then: n={self.num_generated}+2**{m}={total_n}. " + "If you still want to do this, please use " + "'SobolEngine.draw()' instead." + ) + return self.draw(n=n, out=out, dtype=dtype) + + def reset(self): + r""" + Function to reset the ``SobolEngine`` to base state. + """ + self.quasi.copy_(self.shift) + self.num_generated = 0 + return self + + def fast_forward(self, n): + r""" + Function to fast-forward the state of the ``SobolEngine`` by + :attr:`n` steps. This is equivalent to drawing :attr:`n` samples + without using the samples. + + Args: + n (Int): The number of steps to fast-forward by. + """ + if self.num_generated == 0: + torch._sobol_engine_ff_(self.quasi, n - 1, self.sobolstate, self.dimension, self.num_generated) + else: + torch._sobol_engine_ff_(self.quasi, n, self.sobolstate, self.dimension, self.num_generated - 1) + self.num_generated += n + return self + + def _scramble(self): + g: Optional[torch.Generator] = None + if self.seed is not None: + g = torch.Generator() + g.manual_seed(self.seed) + + cpu = torch.device("cpu") + + # Generate shift vector + shift_ints = torch.randint(2, (self.dimension, self.MAXBIT), device=cpu, generator=g) + self.shift = torch.mv(shift_ints, torch.pow(2, torch.arange(0, self.MAXBIT, device=cpu))) + + # Generate lower triangular matrices (stacked across dimensions) + ltm_dims = (self.dimension, self.MAXBIT, self.MAXBIT) + ltm = torch.randint(2, ltm_dims, device=cpu, generator=g).tril() + + torch._sobol_engine_scramble_(self.sobolstate, ltm, self.dimension) + + def __repr__(self): + fmt_string = [f'dimension={self.dimension}'] + if self.scramble: + fmt_string += ['scramble=True'] + if self.seed is not None: + fmt_string += [f'seed={self.seed}'] + return self.__class__.__name__ + '(' + ', '.join(fmt_string) + ')' diff --git a/llmeval-env/lib/python3.10/site-packages/torch/return_types.pyi b/llmeval-env/lib/python3.10/site-packages/torch/return_types.pyi new file mode 100644 index 0000000000000000000000000000000000000000..b6fbf20793518de3e942211b9bca2935f25f3711 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/return_types.pyi @@ -0,0 +1,437 @@ +# @generated from torch/_C/return_types.pyi + +from typing import ( + Any, + Callable, + ContextManager, + Iterator, + List, + Literal, + NamedTuple, + NoReturn, + Optional, + overload, + Sequence, + Tuple, + Type, + TypeVar, + Union, +) + +from torch import contiguous_format, Generator, inf, memory_format, strided, Tensor, SymInt +from torch.types import ( + _bool, + _device, + _dtype, + _float, + _int, + _layout, + _qscheme, + _size, + Number, +) + +class _fake_quantize_per_tensor_affine_cachemask_tensor_qparams(Tuple[Tensor, Tensor]): + @property + def output(self) -> Tensor: ... + @property + def mask(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class _fused_moving_avg_obs_fq_helper(Tuple[Tensor, Tensor]): + @property + def output(self) -> Tensor: ... + @property + def mask(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class _linalg_det(Tuple[Tensor, Tensor, Tensor]): + @property + def result(self) -> Tensor: ... + @property + def LU(self) -> Tensor: ... + @property + def pivots(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor, Tensor]): ... + n_fields: _int = 3 + n_sequeunce_fields: _int = 3 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class _linalg_eigh(Tuple[Tensor, Tensor]): + @property + def eigenvalues(self) -> Tensor: ... + @property + def eigenvectors(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class _linalg_slogdet(Tuple[Tensor, Tensor, Tensor, Tensor]): + @property + def sign(self) -> Tensor: ... + @property + def logabsdet(self) -> Tensor: ... + @property + def LU(self) -> Tensor: ... + @property + def pivots(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor, Tensor, Tensor]): ... + n_fields: _int = 4 + n_sequeunce_fields: _int = 4 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class _linalg_solve_ex(Tuple[Tensor, Tensor, Tensor, Tensor]): + @property + def result(self) -> Tensor: ... + @property + def LU(self) -> Tensor: ... + @property + def pivots(self) -> Tensor: ... + @property + def info(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor, Tensor, Tensor]): ... + n_fields: _int = 4 + n_sequeunce_fields: _int = 4 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class _linalg_svd(Tuple[Tensor, Tensor, Tensor]): + @property + def U(self) -> Tensor: ... + @property + def S(self) -> Tensor: ... + @property + def Vh(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor, Tensor]): ... + n_fields: _int = 3 + n_sequeunce_fields: _int = 3 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class _lu_with_info(Tuple[Tensor, Tensor, Tensor]): + @property + def LU(self) -> Tensor: ... + @property + def pivots(self) -> Tensor: ... + @property + def info(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor, Tensor]): ... + n_fields: _int = 3 + n_sequeunce_fields: _int = 3 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class _scaled_dot_product_cudnn_attention(Tuple[Tensor, Tensor, Tensor, Tensor]): + @property + def output(self) -> Tensor: ... + @property + def logsumexp(self) -> Tensor: ... + @property + def philox_seed(self) -> Tensor: ... + @property + def philox_offset(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor, Tensor, Tensor]): ... + n_fields: _int = 4 + n_sequeunce_fields: _int = 4 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class _scaled_dot_product_efficient_attention(Tuple[Tensor, Tensor, Tensor, Tensor]): + @property + def output(self) -> Tensor: ... + @property + def log_sumexp(self) -> Tensor: ... + @property + def philox_seed(self) -> Tensor: ... + @property + def philox_offset(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor, Tensor, Tensor]): ... + n_fields: _int = 4 + n_sequeunce_fields: _int = 4 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class _scaled_dot_product_flash_attention(Tuple[Tensor, Tensor, Tensor, Tensor, Union[_int, SymInt], Union[_int, SymInt], Tensor, Tensor, Tensor]): + @property + def output(self) -> Tensor: ... + @property + def logsumexp(self) -> Tensor: ... + @property + def cum_seq_q(self) -> Tensor: ... + @property + def cum_seq_k(self) -> Tensor: ... + @property + def max_q(self) -> Union[_int, SymInt]: ... + @property + def max_k(self) -> Union[_int, SymInt]: ... + @property + def philox_seed(self) -> Tensor: ... + @property + def philox_offset(self) -> Tensor: ... + @property + def debug_attn_mask(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor, Tensor, Tensor, Union[_int, SymInt], Union[_int, SymInt], Tensor, Tensor, Tensor]): ... + n_fields: _int = 9 + n_sequeunce_fields: _int = 9 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class _scaled_dot_product_flash_attention_for_cpu(Tuple[Tensor, Tensor]): + @property + def output(self) -> Tensor: ... + @property + def logsumexp(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class _unpack_dual(Tuple[Tensor, Tensor]): + @property + def primal(self) -> Tensor: ... + @property + def tangent(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class aminmax(Tuple[Tensor, Tensor]): + @property + def min(self) -> Tensor: ... + @property + def max(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class cummax(Tuple[Tensor, Tensor]): + @property + def values(self) -> Tensor: ... + @property + def indices(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class cummin(Tuple[Tensor, Tensor]): + @property + def values(self) -> Tensor: ... + @property + def indices(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class frexp(Tuple[Tensor, Tensor]): + @property + def mantissa(self) -> Tensor: ... + @property + def exponent(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class geqrf(Tuple[Tensor, Tensor]): + @property + def a(self) -> Tensor: ... + @property + def tau(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class histogram(Tuple[Tensor, Tensor]): + @property + def hist(self) -> Tensor: ... + @property + def bin_edges(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class histogramdd(Tuple[Tensor, Tuple[Tensor, ...]]): + @property + def hist(self) -> Tensor: ... + @property + def bin_edges(self) -> Tuple[Tensor, ...]: ... + def __new__(cls, sequence: Tuple[Tensor, Tuple[Tensor, ...]]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class kthvalue(Tuple[Tensor, Tensor]): + @property + def values(self) -> Tensor: ... + @property + def indices(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class lu_unpack(Tuple[Tensor, Tensor, Tensor]): + @property + def P(self) -> Tensor: ... + @property + def L(self) -> Tensor: ... + @property + def U(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor, Tensor]): ... + n_fields: _int = 3 + n_sequeunce_fields: _int = 3 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class max(Tuple[Tensor, Tensor]): + @property + def values(self) -> Tensor: ... + @property + def indices(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class median(Tuple[Tensor, Tensor]): + @property + def values(self) -> Tensor: ... + @property + def indices(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class min(Tuple[Tensor, Tensor]): + @property + def values(self) -> Tensor: ... + @property + def indices(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class mode(Tuple[Tensor, Tensor]): + @property + def values(self) -> Tensor: ... + @property + def indices(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class nanmedian(Tuple[Tensor, Tensor]): + @property + def values(self) -> Tensor: ... + @property + def indices(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class qr(Tuple[Tensor, Tensor]): + @property + def Q(self) -> Tensor: ... + @property + def R(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class slogdet(Tuple[Tensor, Tensor]): + @property + def sign(self) -> Tensor: ... + @property + def logabsdet(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class sort(Tuple[Tensor, Tensor]): + @property + def values(self) -> Tensor: ... + @property + def indices(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class svd(Tuple[Tensor, Tensor, Tensor]): + @property + def U(self) -> Tensor: ... + @property + def S(self) -> Tensor: ... + @property + def V(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor, Tensor]): ... + n_fields: _int = 3 + n_sequeunce_fields: _int = 3 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class topk(Tuple[Tensor, Tensor]): + @property + def values(self) -> Tensor: ... + @property + def indices(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +class triangular_solve(Tuple[Tensor, Tensor]): + @property + def solution(self) -> Tensor: ... + @property + def cloned_coefficient(self) -> Tensor: ... + def __new__(cls, sequence: Tuple[Tensor, Tensor]): ... + n_fields: _int = 2 + n_sequeunce_fields: _int = 2 + n_unnamed_fields: _int = 0 + def __init_subclass__(cls) -> NoReturn: ... # prohibit subclassing + +all_return_types: List[Type] = [] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/torch_version.py b/llmeval-env/lib/python3.10/site-packages/torch/torch_version.py new file mode 100644 index 0000000000000000000000000000000000000000..f73a0b71c1a815be6b15d1972fed8350004d6721 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/torch_version.py @@ -0,0 +1,58 @@ +# mypy: ignore-errors + +from typing import Any, Iterable +from .version import __version__ as internal_version +from ._vendor.packaging.version import Version, InvalidVersion + +__all__ = ['TorchVersion'] + + +class TorchVersion(str): + """A string with magic powers to compare to both Version and iterables! + Prior to 1.10.0 torch.__version__ was stored as a str and so many did + comparisons against torch.__version__ as if it were a str. In order to not + break them we have TorchVersion which masquerades as a str while also + having the ability to compare against both packaging.version.Version as + well as tuples of values, eg. (1, 2, 1) + Examples: + Comparing a TorchVersion object to a Version object + TorchVersion('1.10.0a') > Version('1.10.0a') + Comparing a TorchVersion object to a Tuple object + TorchVersion('1.10.0a') > (1, 2) # 1.2 + TorchVersion('1.10.0a') > (1, 2, 1) # 1.2.1 + Comparing a TorchVersion object against a string + TorchVersion('1.10.0a') > '1.2' + TorchVersion('1.10.0a') > '1.2.1' + """ + # fully qualified type names here to appease mypy + def _convert_to_version(self, inp: Any) -> Any: + if isinstance(inp, Version): + return inp + elif isinstance(inp, str): + return Version(inp) + elif isinstance(inp, Iterable): + # Ideally this should work for most cases by attempting to group + # the version tuple, assuming the tuple looks (MAJOR, MINOR, ?PATCH) + # Examples: + # * (1) -> Version("1") + # * (1, 20) -> Version("1.20") + # * (1, 20, 1) -> Version("1.20.1") + return Version('.'.join(str(item) for item in inp)) + else: + raise InvalidVersion(inp) + + def _cmp_wrapper(self, cmp: Any, method: str) -> bool: + try: + return getattr(Version(self), method)(self._convert_to_version(cmp)) + except BaseException as e: + if not isinstance(e, InvalidVersion): + raise + # Fall back to regular string comparison if dealing with an invalid + # version like 'parrot' + return getattr(super(), method)(cmp) + + +for cmp_method in ["__gt__", "__lt__", "__eq__", "__ge__", "__le__"]: + setattr(TorchVersion, cmp_method, lambda x, y, method=cmp_method: x._cmp_wrapper(y, method)) + +__version__ = TorchVersion(internal_version) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/types.py b/llmeval-env/lib/python3.10/site-packages/torch/types.py new file mode 100644 index 0000000000000000000000000000000000000000..22c01e3bb9795ec2ca23d6149ebbbfc0ab19bb7e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/types.py @@ -0,0 +1,79 @@ +import torch +from typing import Any, List, Optional, Sequence, Tuple, Union + +import builtins + +# Convenience aliases for common composite types that we need +# to talk about in PyTorch + +_TensorOrTensors = Union[torch.Tensor, Sequence[torch.Tensor]] +_TensorOrTensorsOrGradEdge = Union[ + torch.Tensor, Sequence[torch.Tensor], + "torch.autograd.graph.GradientEdge", + Sequence["torch.autograd.graph.GradientEdge"]] + +# In some cases, these basic types are shadowed by corresponding +# top-level values. The underscore variants let us refer to these +# types. See https://github.com/python/mypy/issues/4146 for why these +# workarounds is necessary +_int = builtins.int +_float = builtins.float +_bool = builtins.bool +_complex = builtins.complex + +_dtype = torch.dtype +_device = torch.device +_qscheme = torch.qscheme +_size = Union[torch.Size, List[_int], Tuple[_int, ...]] +_layout = torch.layout +_dispatchkey = Union[str, torch._C.DispatchKey] + +# Meta-type for "numeric" things; matches our docs +Number = Union[builtins.int, builtins.float, builtins.bool] + +# Meta-type for "device-like" things. Not to be confused with 'device' (a +# literal device object). This nomenclature is consistent with PythonArgParser. +# None means use the default device (typically CPU) +Device = Optional[Union[_device, str, _int]] +del Optional + +# Storage protocol implemented by ${Type}StorageBase classes + +class Storage: + _cdata: int + device: torch.device + dtype: torch.dtype + _torch_load_uninitialized: bool + + def __deepcopy__(self, memo) -> 'Storage': # type: ignore[empty-body] + ... + + def _new_shared(self, int) -> 'Storage': # type: ignore[empty-body] + ... + + def _write_file(self, f: Any, is_real_file: _bool, save_size: _bool, element_size: int) -> None: + ... + + def element_size(self) -> int: # type: ignore[empty-body] + ... + + def is_shared(self) -> bool: # type: ignore[empty-body] + ... + + def share_memory_(self) -> 'Storage': # type: ignore[empty-body] + ... + + def nbytes(self) -> int: # type: ignore[empty-body] + ... + + def cpu(self) -> 'Storage': # type: ignore[empty-body] + ... + + def data_ptr(self) -> int: # type: ignore[empty-body] + ... + + def from_file(self, filename: str, shared: bool = False, nbytes: int = 0) -> 'Storage': # type: ignore[empty-body] + ... + + def _new_with_file(self, f: Any, element_size: int) -> 'Storage': # type: ignore[empty-body] + ... diff --git a/llmeval-env/lib/python3.10/site-packages/torch/version.py b/llmeval-env/lib/python3.10/site-packages/torch/version.py new file mode 100644 index 0000000000000000000000000000000000000000..9304464816939b49672ddc57a606634c398fbb71 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/version.py @@ -0,0 +1,8 @@ +from typing import Optional + +__all__ = ['__version__', 'debug', 'cuda', 'git_version', 'hip'] +__version__ = '2.3.0+cu121' +debug = False +cuda: Optional[str] = '12.1' +git_version = '97ff6cfd9c86c5c09d7ce775ab64ec5c99230f5d' +hip: Optional[str] = None diff --git a/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..c2f84aeb06f7a520b7cf17bdd9c9c3854dc4c469 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/LICENSE @@ -0,0 +1,15 @@ +Apache Software License 2.0 + +Copyright (c) 2020, Paul Ganssle (Google) + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/LICENSE_APACHE b/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/LICENSE_APACHE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/LICENSE_APACHE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..5ca4be3198422cb791a135e09d1140b80cc511a5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/METADATA @@ -0,0 +1,33 @@ +Metadata-Version: 2.1 +Name: tzdata +Version: 2024.1 +Summary: Provider of IANA time zone data +Home-page: https://github.com/python/tzdata +Author: Python Software Foundation +Author-email: datetime-sig@python.org +License: Apache-2.0 +Project-URL: Bug Reports, https://github.com/python/tzdata/issues +Project-URL: Source, https://github.com/python/tzdata +Project-URL: Documentation, https://tzdata.readthedocs.io +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 3 +Requires-Python: >=2 +Description-Content-Type: text/x-rst +License-File: LICENSE +License-File: licenses/LICENSE_APACHE + +tzdata: Python package providing IANA time zone data +==================================================== + +This is a Python package containing ``zic``-compiled binaries for the IANA time +zone database. It is intended to be a fallback for systems that do not have +system time zone data installed (or don't have it installed in a standard +location), as a part of `PEP 615 `_ + +This repository generates a ``pip``-installable package, published on PyPI as +`tzdata `_. + +For more information, see `the documentation `_. diff --git a/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..b48ef410177c690efef9a31d64c89f09360cba8f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/RECORD @@ -0,0 +1,655 @@ +tzdata-2024.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +tzdata-2024.1.dist-info/LICENSE,sha256=M-jlAC01EtP8wigrmV5rrZ0zR4G5xawxhD9ASQDh87Q,592 +tzdata-2024.1.dist-info/LICENSE_APACHE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357 +tzdata-2024.1.dist-info/METADATA,sha256=opWqMTU2QGRjhy337uxoIBXNr84QcEx8I9-iisPqxmA,1393 +tzdata-2024.1.dist-info/RECORD,, +tzdata-2024.1.dist-info/WHEEL,sha256=-G_t0oGuE7UD0DrSpVZnq1hHMBV9DD2XkS5v7XpmTnk,110 +tzdata-2024.1.dist-info/top_level.txt,sha256=MO6QqC0xRrN67Gh9xU_nMmadwBVlYzPNkq_h4gYuzaQ,7 +tzdata/__init__.py,sha256=iofGPw33aJlVNgOXQP4kzxiXOEj8im69E8cgZZS874Q,252 +tzdata/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/Africa/Abidjan,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130 +tzdata/zoneinfo/Africa/Accra,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130 +tzdata/zoneinfo/Africa/Addis_Ababa,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191 +tzdata/zoneinfo/Africa/Algiers,sha256=L2nS4gLNFvuo89p3YtB-lSDYY2284SqkGH9pQQI8uwc,470 +tzdata/zoneinfo/Africa/Asmara,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191 +tzdata/zoneinfo/Africa/Asmera,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191 +tzdata/zoneinfo/Africa/Bamako,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130 +tzdata/zoneinfo/Africa/Bangui,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180 +tzdata/zoneinfo/Africa/Banjul,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130 +tzdata/zoneinfo/Africa/Bissau,sha256=wa3uva129dJHRCi7tYt04kFOn1-osMS2afMjleO9mDw,149 +tzdata/zoneinfo/Africa/Blantyre,sha256=_UqXNoIwqJZ2yYd3lRCpkg_o2RH6BlSBU20QSM0PUp4,131 +tzdata/zoneinfo/Africa/Brazzaville,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180 +tzdata/zoneinfo/Africa/Bujumbura,sha256=_UqXNoIwqJZ2yYd3lRCpkg_o2RH6BlSBU20QSM0PUp4,131 +tzdata/zoneinfo/Africa/Cairo,sha256=icuaNiEvuC6TPc2fqhDv36lpop7IDDIGO7tFGMAz0b4,1309 +tzdata/zoneinfo/Africa/Casablanca,sha256=MMps8T4AwqbEN6PIN_pkNiPMBEBqtRZRZceLN-9rxMM,1919 +tzdata/zoneinfo/Africa/Ceuta,sha256=oEIgK53afz1SYxYB_D0jR98Ss3g581yb8TnLppPaYcY,562 +tzdata/zoneinfo/Africa/Conakry,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130 +tzdata/zoneinfo/Africa/Dakar,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130 +tzdata/zoneinfo/Africa/Dar_es_Salaam,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191 +tzdata/zoneinfo/Africa/Djibouti,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191 +tzdata/zoneinfo/Africa/Douala,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180 +tzdata/zoneinfo/Africa/El_Aaiun,sha256=6hfLbLfrD1Qy9ZZqLXr1Xw7fzeEs_FqeHN2zZJZUVJI,1830 +tzdata/zoneinfo/Africa/Freetown,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130 +tzdata/zoneinfo/Africa/Gaborone,sha256=_UqXNoIwqJZ2yYd3lRCpkg_o2RH6BlSBU20QSM0PUp4,131 +tzdata/zoneinfo/Africa/Harare,sha256=_UqXNoIwqJZ2yYd3lRCpkg_o2RH6BlSBU20QSM0PUp4,131 +tzdata/zoneinfo/Africa/Johannesburg,sha256=0Zrr4kNcToS_euZVM9I6nUQPmBYuW01pxz94PgIpnsg,190 +tzdata/zoneinfo/Africa/Juba,sha256=VTpoMAP-jJ6cKsDeNVr7l3LKGoKDUxGU2b1gqvDPz34,458 +tzdata/zoneinfo/Africa/Kampala,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191 +tzdata/zoneinfo/Africa/Khartoum,sha256=NRwOwIg4SR6XuD11k3hxBz77uoBpzejXq7vxtq2Xys8,458 +tzdata/zoneinfo/Africa/Kigali,sha256=_UqXNoIwqJZ2yYd3lRCpkg_o2RH6BlSBU20QSM0PUp4,131 +tzdata/zoneinfo/Africa/Kinshasa,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180 +tzdata/zoneinfo/Africa/Lagos,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180 +tzdata/zoneinfo/Africa/Libreville,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180 +tzdata/zoneinfo/Africa/Lome,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130 +tzdata/zoneinfo/Africa/Luanda,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180 +tzdata/zoneinfo/Africa/Lubumbashi,sha256=_UqXNoIwqJZ2yYd3lRCpkg_o2RH6BlSBU20QSM0PUp4,131 +tzdata/zoneinfo/Africa/Lusaka,sha256=_UqXNoIwqJZ2yYd3lRCpkg_o2RH6BlSBU20QSM0PUp4,131 +tzdata/zoneinfo/Africa/Malabo,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180 +tzdata/zoneinfo/Africa/Maputo,sha256=_UqXNoIwqJZ2yYd3lRCpkg_o2RH6BlSBU20QSM0PUp4,131 +tzdata/zoneinfo/Africa/Maseru,sha256=0Zrr4kNcToS_euZVM9I6nUQPmBYuW01pxz94PgIpnsg,190 +tzdata/zoneinfo/Africa/Mbabane,sha256=0Zrr4kNcToS_euZVM9I6nUQPmBYuW01pxz94PgIpnsg,190 +tzdata/zoneinfo/Africa/Mogadishu,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191 +tzdata/zoneinfo/Africa/Monrovia,sha256=WM-JVfr502Vgy18Fe6iAJ2yMgOWbwwumIQh_yp53eKM,164 +tzdata/zoneinfo/Africa/Nairobi,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191 +tzdata/zoneinfo/Africa/Ndjamena,sha256=Tlj4ZUUNJxEhvAoo7TJKqWv1J7tEYaf1FEMez-K9xEg,160 +tzdata/zoneinfo/Africa/Niamey,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180 +tzdata/zoneinfo/Africa/Nouakchott,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130 +tzdata/zoneinfo/Africa/Ouagadougou,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130 +tzdata/zoneinfo/Africa/Porto-Novo,sha256=5e8SiFccxWxSdsqWbhyKZ1xnR3JtdY7K_n7_zm7Ke-Q,180 +tzdata/zoneinfo/Africa/Sao_Tome,sha256=Pfiutakw5B5xr1OSg1uFvT0GwC6jVOqqxnx69GEJu50,173 +tzdata/zoneinfo/Africa/Timbuktu,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130 +tzdata/zoneinfo/Africa/Tripoli,sha256=zzMBLZZh4VQ4_ARe5k4L_rsuqKP7edKvVt8F6kvj5FM,431 +tzdata/zoneinfo/Africa/Tunis,sha256=uoAEER48RJqNeGoYBuk5IeYqjc8sHvWLvKssuVCd18g,449 +tzdata/zoneinfo/Africa/Windhoek,sha256=g1jLRko_2peGsUTg0_wZycOC4gxTAHwfV2SO9I3KdCM,638 +tzdata/zoneinfo/Africa/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/Africa/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/America/Adak,sha256=q_sZgOINX4TsX9iBx1gNd6XGwBnzCjg6qpdAQhK0ieA,969 +tzdata/zoneinfo/America/Anchorage,sha256=d8oMIpYvBpmLzl5I2By4ZaFEZsg_9dxgfqpIM0QFi_Y,977 +tzdata/zoneinfo/America/Anguilla,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/Antigua,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/Araguaina,sha256=TawYX4lVAxq0BxUGhTDx4C8vtBRnLuWi8qLV_oXDiUo,592 +tzdata/zoneinfo/America/Argentina/Buenos_Aires,sha256=IEVOpSfI6oiJJmFNIb9Vb0bOOMIgxO5bghFw7vkHFGk,708 +tzdata/zoneinfo/America/Argentina/Catamarca,sha256=UC0fxx7ZPmjPw3D0BK-5vap-c1cBzbgR293MdmEfOx0,708 +tzdata/zoneinfo/America/Argentina/ComodRivadavia,sha256=UC0fxx7ZPmjPw3D0BK-5vap-c1cBzbgR293MdmEfOx0,708 +tzdata/zoneinfo/America/Argentina/Cordoba,sha256=9Ij3WjT9mWMKQ43LeSUIqQuDb9zS3FSlHYPVNQJTFf0,708 +tzdata/zoneinfo/America/Argentina/Jujuy,sha256=7YpjOcmVaKKpiq31rQe8TTDNExdH9jjZIhdcZv-ShUg,690 +tzdata/zoneinfo/America/Argentina/La_Rioja,sha256=mUkRD5jaWJUy2f8vNFqOlMgKPptULOBn-vf_jMgF6x8,717 +tzdata/zoneinfo/America/Argentina/Mendoza,sha256=dL4q0zgY2FKPbG8cC-Wknnpp8tF2Y7SWgWSC_G_WznI,708 +tzdata/zoneinfo/America/Argentina/Rio_Gallegos,sha256=bCpWMlEI8KWe4c3n6fn8u6WCPnxjYtVy57ERtLTZaEs,708 +tzdata/zoneinfo/America/Argentina/Salta,sha256=H_ybxVycfOe7LlUA3GngoS0jENHkQURIRhjfJQF2kfU,690 +tzdata/zoneinfo/America/Argentina/San_Juan,sha256=Mj5vIUzQl5DtsPe3iMzS7rR-88U9HKW2csQqUda4JNM,717 +tzdata/zoneinfo/America/Argentina/San_Luis,sha256=rka8BokogyvMRFH6jr8D6s1tFIpsUeqHJ_feLK5O6ds,717 +tzdata/zoneinfo/America/Argentina/Tucuman,sha256=yv3aC-hALLio2yqneLIIylZhXKDlbPJGAd_abgsj9gg,726 +tzdata/zoneinfo/America/Argentina/Ushuaia,sha256=mcmZgB1pEHX6i7nlyRzjLnG8bqAtAK1TwMdRD2pZqBE,708 +tzdata/zoneinfo/America/Argentina/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/America/Argentina/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/America/Aruba,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/Asuncion,sha256=PuuUl8VILSBeZWDyLkM67bWl47xPMcJ0fY-rAhvSFzc,884 +tzdata/zoneinfo/America/Atikokan,sha256=p41zBnujy9lPiiPf3WqotoyzOxhIS8F7TiDqGuwvCoE,149 +tzdata/zoneinfo/America/Atka,sha256=q_sZgOINX4TsX9iBx1gNd6XGwBnzCjg6qpdAQhK0ieA,969 +tzdata/zoneinfo/America/Bahia,sha256=_-ZFw-HzXc7byacHW_NJHtJ03ADFdqt1kaYgyWYobYw,682 +tzdata/zoneinfo/America/Bahia_Banderas,sha256=F2Tz2IIWs9nqdSb5sdKLrO6Cu0xiGLbQZ3TamKR4v5A,728 +tzdata/zoneinfo/America/Barbados,sha256=gdiJf9ZKOMs9QB4ex0-crvdmhNfHpNzXTV2xTaNDCAg,278 +tzdata/zoneinfo/America/Belem,sha256=w0jv-gdBbEBZQBF2z2liKpRM9CEOWA36O1qU1nJKeCs,394 +tzdata/zoneinfo/America/Belize,sha256=uYBPJqnCGnOOeKnoz1IG9POWTvXD5kUirpFuB0PHjVo,1045 +tzdata/zoneinfo/America/Blanc-Sablon,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/Boa_Vista,sha256=hYTFFNNZJdl_nSYIdfI8SQhtmfiakjCDI_15TlB-xEw,430 +tzdata/zoneinfo/America/Bogota,sha256=BqH6uClrrlT-VsBmke2Mh-IfA1R1l1h031CRUSLS1no,179 +tzdata/zoneinfo/America/Boise,sha256=Jt3omyPSPRoKE-KXVd-wxVON-CDE5oGaJA7Ar90Q2OM,999 +tzdata/zoneinfo/America/Buenos_Aires,sha256=IEVOpSfI6oiJJmFNIb9Vb0bOOMIgxO5bghFw7vkHFGk,708 +tzdata/zoneinfo/America/Cambridge_Bay,sha256=NFwNVfgxb2YMLzc-42RA-SKtNcODpukEfYf_QWWYTsI,883 +tzdata/zoneinfo/America/Campo_Grande,sha256=mngKYjaH_ENVmJ-mtURVjjFo5kHgLfYNPHZaCVSxQFE,952 +tzdata/zoneinfo/America/Cancun,sha256=XOYTJdVeHFfKeSGxHcZ_stJ9_Vkqn0q0LmS1mhnGI8o,529 +tzdata/zoneinfo/America/Caracas,sha256=UHmUwc0mFPoidR4UDCWb4T4w_mpCBsSb4BkW3SOKIVY,190 +tzdata/zoneinfo/America/Catamarca,sha256=UC0fxx7ZPmjPw3D0BK-5vap-c1cBzbgR293MdmEfOx0,708 +tzdata/zoneinfo/America/Cayenne,sha256=9URU4o1v5759UWuh8xI9vnaANOceOeRW67XoGQuuUa8,151 +tzdata/zoneinfo/America/Cayman,sha256=p41zBnujy9lPiiPf3WqotoyzOxhIS8F7TiDqGuwvCoE,149 +tzdata/zoneinfo/America/Chicago,sha256=wntzn_RqffBZThINcltDkhfhHkTqmlDNxJEwODtUguc,1754 +tzdata/zoneinfo/America/Chihuahua,sha256=hHey29pNZGuKh_bTiluGQSOGAhiQuCG4VMNGlJCgxPs,691 +tzdata/zoneinfo/America/Ciudad_Juarez,sha256=eJkqieD7ixtltRojAKRk4iNRk-bZZZDPQV2hyR1vMmI,718 +tzdata/zoneinfo/America/Coral_Harbour,sha256=p41zBnujy9lPiiPf3WqotoyzOxhIS8F7TiDqGuwvCoE,149 +tzdata/zoneinfo/America/Cordoba,sha256=9Ij3WjT9mWMKQ43LeSUIqQuDb9zS3FSlHYPVNQJTFf0,708 +tzdata/zoneinfo/America/Costa_Rica,sha256=ihoqA_tHmYm0YjTRLZu3q8PqsqqOeb1CELjWhPf_HXE,232 +tzdata/zoneinfo/America/Creston,sha256=rhFFPCHQiYTedfLv7ATckxeKe04jxeUvIJi4vUXMtUc,240 +tzdata/zoneinfo/America/Cuiaba,sha256=OaIle0Cr-BKe0hOik5rwdcoCbQ5LSHkHqBS2cLoCqAU,934 +tzdata/zoneinfo/America/Curacao,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/Danmarkshavn,sha256=cQORuA8pR0vw3ZwYfeGkWaT1tPU66nMQ2xRKT1T1Yb4,447 +tzdata/zoneinfo/America/Dawson,sha256=BlKV0U36jqnlxM5-Pxn8OIiY5kJEcLlt3QZo-GsMzlY,1029 +tzdata/zoneinfo/America/Dawson_Creek,sha256=t4USMuIvq1VVL9gYCabraAYs31kmAqAnwf7GzEiJJNc,683 +tzdata/zoneinfo/America/Denver,sha256=m7cDkg7KS2EZ6BoQVYOk9soiBlHxO0GEeat81WxBPz4,1042 +tzdata/zoneinfo/America/Detroit,sha256=I4F8Mt9nx38AF6D-steYskBa_HHO6jKU1-W0yRFr50A,899 +tzdata/zoneinfo/America/Dominica,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/Edmonton,sha256=Dq2mxcSNWZhMWRqxwwtMcaqwAIGMwkOzz-mW8fJscV8,970 +tzdata/zoneinfo/America/Eirunepe,sha256=6tKYaRpnbBSmXiwXy7_m4WW_rbVfn5LUec0keC3J7Iw,436 +tzdata/zoneinfo/America/El_Salvador,sha256=4wjsCpRH9AFk5abLAbnuv-zouhRKcwb0aenk-nWtmz0,176 +tzdata/zoneinfo/America/Ensenada,sha256=x2_eWDUWxIi5gKTGmM_d5V1HFt1-JN-j8dIpqj5Dn7M,1025 +tzdata/zoneinfo/America/Fort_Nelson,sha256=_j7IJ-hXHtV_7dSMg6pxGQLb6z_IaUMj3aJde_F49QQ,1448 +tzdata/zoneinfo/America/Fort_Wayne,sha256=5nj0KhPvvXvg8mqc5T4EscKKWC6rBWEcsBwWg2Qy8Hs,531 +tzdata/zoneinfo/America/Fortaleza,sha256=ugF4DWO3j_khONebf7CLsT9ldL-JOWey_69S0jl2LIA,484 +tzdata/zoneinfo/America/Glace_Bay,sha256=I1posPHAEfg_Lc_FQdX1B8F8_A0NeJnK72p36PE7pKM,880 +tzdata/zoneinfo/America/Godthab,sha256=LlGZ5Y_ud9JwWRvncHnUHRArQbbnNcmmrz3duMhR3Hc,965 +tzdata/zoneinfo/America/Goose_Bay,sha256=gCJA1Sk2ciUg2WInn8DmPBwRAw0FjQbYPaUJK80mtMI,1580 +tzdata/zoneinfo/America/Grand_Turk,sha256=Gp8hpMt9P3QoEHmsIX2bqGNMkUSvlwZqqNzccR-cbe8,853 +tzdata/zoneinfo/America/Grenada,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/Guadeloupe,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/Guatemala,sha256=BGPGI4lyN6IFF_T0kx1q2lh3U5SEhbyDqLFuW8EFCaU,212 +tzdata/zoneinfo/America/Guayaquil,sha256=8OIaCy-SirKKz4I77l6MQFDgSLHtjN0TvklLVEZ_008,179 +tzdata/zoneinfo/America/Guyana,sha256=PmnEtWtOTamsPJXEo7PcNQCy2Rp-evGyJh4cf0pjAR4,181 +tzdata/zoneinfo/America/Halifax,sha256=kO5ahBM2oTLfWS4KX15FbKXfo5wg-f9vw1_hMOISGig,1672 +tzdata/zoneinfo/America/Havana,sha256=ms5rCuq2yBM49VmTymMtFQN3c5aBN1lkd8jjzKdnNm8,1117 +tzdata/zoneinfo/America/Hermosillo,sha256=W-QiSzPq2J-hWWQ-uzD6McLKzG8XPEawbJpnXlNp3-Q,286 +tzdata/zoneinfo/America/Indiana/Indianapolis,sha256=5nj0KhPvvXvg8mqc5T4EscKKWC6rBWEcsBwWg2Qy8Hs,531 +tzdata/zoneinfo/America/Indiana/Knox,sha256=KJCzXct8CTMItVLYLYeBqM6aT6b53gWCg6aDbsH58oI,1016 +tzdata/zoneinfo/America/Indiana/Marengo,sha256=ygWmq8sYee8NFwlSZyQ_tsKopFQMp9Ne557zGGbyF2Y,567 +tzdata/zoneinfo/America/Indiana/Petersburg,sha256=BIrubzHEp5QoyMaPgYbC1zSa_F3LwpXzKM8xH3rHspI,683 +tzdata/zoneinfo/America/Indiana/Tell_City,sha256=em2YMHDWEFXdZH0BKi5bLRAQ8bYDfop2T0Q8SqDh0B8,522 +tzdata/zoneinfo/America/Indiana/Vevay,sha256=dPk334e7MQwl71-avNyREBYVWuFTQcVKfltlRhrlRpw,369 +tzdata/zoneinfo/America/Indiana/Vincennes,sha256=jiODDXepmLP3gvCkBufdE3rp5cEXftBHnKne8_XOOCg,558 +tzdata/zoneinfo/America/Indiana/Winamac,sha256=hsEunaLrbxvspyV3Qm4UD7x7qOAeBtzcbbzANNMrdiw,603 +tzdata/zoneinfo/America/Indiana/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/America/Indiana/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/America/Indianapolis,sha256=5nj0KhPvvXvg8mqc5T4EscKKWC6rBWEcsBwWg2Qy8Hs,531 +tzdata/zoneinfo/America/Inuvik,sha256=d_ZX-USS70HIT-_PRJKMY6mbQRvbKLvsy9ar7uL2M40,817 +tzdata/zoneinfo/America/Iqaluit,sha256=nONS7zksGHTrbEJj73LYRZW964OncQuj_V6fNjpDoQ0,855 +tzdata/zoneinfo/America/Jamaica,sha256=pDexcAMzrv9TqLWGjVOHwIDcFMLT6Vqlzjb5AbNmkoQ,339 +tzdata/zoneinfo/America/Jujuy,sha256=7YpjOcmVaKKpiq31rQe8TTDNExdH9jjZIhdcZv-ShUg,690 +tzdata/zoneinfo/America/Juneau,sha256=V8IqRaJHSH7onK1gu3YYtW_a4VkNwjx5DCvQXpFdYAo,966 +tzdata/zoneinfo/America/Kentucky/Louisville,sha256=zS2SS573D9TmQZFWtSyRIVN3ZXVN_2FpVBbtqQFMzKU,1242 +tzdata/zoneinfo/America/Kentucky/Monticello,sha256=54or2oQ9bSbM9ifRoOjV7UjRF83jSSPuxfGeXH0nIqk,972 +tzdata/zoneinfo/America/Kentucky/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/America/Kentucky/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/America/Knox_IN,sha256=KJCzXct8CTMItVLYLYeBqM6aT6b53gWCg6aDbsH58oI,1016 +tzdata/zoneinfo/America/Kralendijk,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/La_Paz,sha256=2iYBxnc0HIwAzlx-Q3AI9Lb0GI87VY279oGcroBZSVs,170 +tzdata/zoneinfo/America/Lima,sha256=7vNjRhxzL-X4kyba-NkzXYNAOE-cqqcXvzXTqcTXBhY,283 +tzdata/zoneinfo/America/Los_Angeles,sha256=IA0FdU9tg6Nxz0CNcIUSV5dlezsL6-uh5QjP_oaj5cg,1294 +tzdata/zoneinfo/America/Louisville,sha256=zS2SS573D9TmQZFWtSyRIVN3ZXVN_2FpVBbtqQFMzKU,1242 +tzdata/zoneinfo/America/Lower_Princes,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/Maceio,sha256=dSVg0dHedT9w1QO2F1AvWoel4_h8wmuYS4guEaL-5Kk,502 +tzdata/zoneinfo/America/Managua,sha256=ZYsoyN_GIlwAIpIj1spjQDPWGQ9kFZSipjUbO8caGfw,295 +tzdata/zoneinfo/America/Manaus,sha256=9kgrhpryB94YOVoshJliiiDSf9mwjb3OZwX0HusNRrk,412 +tzdata/zoneinfo/America/Marigot,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/Martinique,sha256=m3rC6Mogc6cc1a9XJ8FPIYhZaSFNdYkxaZ-pfHhG3X4,178 +tzdata/zoneinfo/America/Matamoros,sha256=KxgAMGkE7TJuug9byFsT3KN836X3OyXq77v-tFpLVvc,437 +tzdata/zoneinfo/America/Mazatlan,sha256=C5CBj73KgB8vbDbDEgqMHfPeMeglQj156WNbwYSxux8,718 +tzdata/zoneinfo/America/Mendoza,sha256=dL4q0zgY2FKPbG8cC-Wknnpp8tF2Y7SWgWSC_G_WznI,708 +tzdata/zoneinfo/America/Menominee,sha256=oUmJmzOZtChYrB9In-E1GqEVi2ogKjPESXlUySUGs94,917 +tzdata/zoneinfo/America/Merida,sha256=KTdHMhhdhJtTg40KW2qSfd6N9PAQ50d_ektYDt2ouy0,654 +tzdata/zoneinfo/America/Metlakatla,sha256=EVj1LkMCgry6mT8Ln_FpHxpJSU0oSncfbHGWIQ0SI_0,586 +tzdata/zoneinfo/America/Mexico_City,sha256=vhDy1hSceJyFa3bIqn2qRi1kgxtvrCCaaB7s65mljtY,773 +tzdata/zoneinfo/America/Miquelon,sha256=Eey-Id5b4HFODINweRFtbDjcgjs_myiC2UwsgYt4kVk,550 +tzdata/zoneinfo/America/Moncton,sha256=knrBNDFwHAGFr0nWJTBQ-10F_fZ5x4n3SnZtH-KI6h8,1493 +tzdata/zoneinfo/America/Monterrey,sha256=GWEQgKgJQV89hVpFOO6nS1AYvdM6Lcw_xeYwMfkV6bg,644 +tzdata/zoneinfo/America/Montevideo,sha256=l7FjW6qscGzdvfjlbIeZ5CQ_AFWS3ZeVDS5ppMJCNM0,969 +tzdata/zoneinfo/America/Montreal,sha256=gVq023obEpKGfS-SS3GOG7oyRVzp-SIF2y_rZQKcZ2E,1717 +tzdata/zoneinfo/America/Montserrat,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/Nassau,sha256=gVq023obEpKGfS-SS3GOG7oyRVzp-SIF2y_rZQKcZ2E,1717 +tzdata/zoneinfo/America/New_York,sha256=1_IgazpFmJ_JrWPVWJIlMvpzUigNX4cXa_HbecsdH6k,1744 +tzdata/zoneinfo/America/Nipigon,sha256=gVq023obEpKGfS-SS3GOG7oyRVzp-SIF2y_rZQKcZ2E,1717 +tzdata/zoneinfo/America/Nome,sha256=_-incQnh0DwK9hJqFaYzO4osUKAUB2k2lae565sblpA,975 +tzdata/zoneinfo/America/Noronha,sha256=Q0r3GtA5y2RGkOj56OTZG5tuBy1B6kfbhyrJqCgf27g,484 +tzdata/zoneinfo/America/North_Dakota/Beulah,sha256=RvaBIS60bNNRmREi6BXSWEbJSrcP7J8Nmxg8OkBcrow,1043 +tzdata/zoneinfo/America/North_Dakota/Center,sha256=M09x4Mx6hcBAwktvwv16YvPRmsuDjZEDwHT0Umkcgyo,990 +tzdata/zoneinfo/America/North_Dakota/New_Salem,sha256=mZca9gyfO2USzax7v0mLJEYBKBVmIqylWqnfLgSsVys,990 +tzdata/zoneinfo/America/North_Dakota/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/America/North_Dakota/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/America/Nuuk,sha256=LlGZ5Y_ud9JwWRvncHnUHRArQbbnNcmmrz3duMhR3Hc,965 +tzdata/zoneinfo/America/Ojinaga,sha256=EMAldBXpY3Vgog_8yESXQb3qoS1v69jmWm0JPgs3k9U,718 +tzdata/zoneinfo/America/Panama,sha256=p41zBnujy9lPiiPf3WqotoyzOxhIS8F7TiDqGuwvCoE,149 +tzdata/zoneinfo/America/Pangnirtung,sha256=nONS7zksGHTrbEJj73LYRZW964OncQuj_V6fNjpDoQ0,855 +tzdata/zoneinfo/America/Paramaribo,sha256=C2v9tR6no54CRECWDFhANTl40UsA4AhHsdnGoNCb4_Q,187 +tzdata/zoneinfo/America/Phoenix,sha256=rhFFPCHQiYTedfLv7ATckxeKe04jxeUvIJi4vUXMtUc,240 +tzdata/zoneinfo/America/Port-au-Prince,sha256=wsS6VbQ__bKJ2IUMPy_Pao0CLRK5pXEBrqkaYuqs3Ns,565 +tzdata/zoneinfo/America/Port_of_Spain,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/Porto_Acre,sha256=VjuQUr668phq5bcH40r94BPnZBKHzJf_MQBfM6Db96U,418 +tzdata/zoneinfo/America/Porto_Velho,sha256=9yPU8EXtKDQHLF745ETc9qZZ9Me2CK6jvgb6S53pSKg,394 +tzdata/zoneinfo/America/Puerto_Rico,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/Punta_Arenas,sha256=2Aqh7bqo-mQlnMjURDkCOeEYmeXhkzKP7OxFAvhTjjA,1218 +tzdata/zoneinfo/America/Rainy_River,sha256=ANzwYGBU1PknQW4LR-H92i5c4Db95LU-UQhPhWZCjDo,1294 +tzdata/zoneinfo/America/Rankin_Inlet,sha256=JQCXQBdyc8uJTjIFO4jZuzS0OjG0gRHv8MPmdzN93CU,807 +tzdata/zoneinfo/America/Recife,sha256=3yZTwF3MJlkY0D48CQUTzCRwDCfGNq8EXXTZYlBgUTg,484 +tzdata/zoneinfo/America/Regina,sha256=_JHuns225iE-THc9NFp-RBq4PWULAuGw2OLbpOB_UMw,638 +tzdata/zoneinfo/America/Resolute,sha256=2UeJBR2ZSkn1bUZy0G0SEhBtY9vycwSRU4naK-sw044,807 +tzdata/zoneinfo/America/Rio_Branco,sha256=VjuQUr668phq5bcH40r94BPnZBKHzJf_MQBfM6Db96U,418 +tzdata/zoneinfo/America/Rosario,sha256=9Ij3WjT9mWMKQ43LeSUIqQuDb9zS3FSlHYPVNQJTFf0,708 +tzdata/zoneinfo/America/Santa_Isabel,sha256=x2_eWDUWxIi5gKTGmM_d5V1HFt1-JN-j8dIpqj5Dn7M,1025 +tzdata/zoneinfo/America/Santarem,sha256=dDEGsnrm4wrzl4sK6K8PzEroBKD7A1V7HBa8cWW4cMk,409 +tzdata/zoneinfo/America/Santiago,sha256=_QBpU8K0QqLh5m2yqWfdkypIJDkPAc3dnIAc5jRQxxU,1354 +tzdata/zoneinfo/America/Santo_Domingo,sha256=xmJo59mZXN7Wnf-3Jjl37mCC-8GfN6xmk2l_vngyfeI,317 +tzdata/zoneinfo/America/Sao_Paulo,sha256=-izrIi8GXAKJ85l_8MVLoFp0pZm0Uihw-oapbiThiJE,952 +tzdata/zoneinfo/America/Scoresbysund,sha256=wrhIEVAFI29qKT3TdOWiiJwI80AohXwwfb1mCPSAXHo,984 +tzdata/zoneinfo/America/Shiprock,sha256=m7cDkg7KS2EZ6BoQVYOk9soiBlHxO0GEeat81WxBPz4,1042 +tzdata/zoneinfo/America/Sitka,sha256=pF5yln--MOzEMDacNd_Id0HX9pAmge8POfcxyTNh1-0,956 +tzdata/zoneinfo/America/St_Barthelemy,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/St_Johns,sha256=v99q_AFMPll5MMxMp98aqY40cmis2wciTfTqs2_kb0k,1878 +tzdata/zoneinfo/America/St_Kitts,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/St_Lucia,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/St_Thomas,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/St_Vincent,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/Swift_Current,sha256=F-b65Yaax23CsuhSmeTDl6Tv9du4IsvWvMbbSuwHkLM,368 +tzdata/zoneinfo/America/Tegucigalpa,sha256=KlvqBJGswa9DIXlE3acU-pgd4IFqDeBRrUz02PmlNC0,194 +tzdata/zoneinfo/America/Thule,sha256=LzL5jdmZkxRkHdA3XkoqJPG_ImllnSRhYYLQpMf_TY8,455 +tzdata/zoneinfo/America/Thunder_Bay,sha256=gVq023obEpKGfS-SS3GOG7oyRVzp-SIF2y_rZQKcZ2E,1717 +tzdata/zoneinfo/America/Tijuana,sha256=x2_eWDUWxIi5gKTGmM_d5V1HFt1-JN-j8dIpqj5Dn7M,1025 +tzdata/zoneinfo/America/Toronto,sha256=gVq023obEpKGfS-SS3GOG7oyRVzp-SIF2y_rZQKcZ2E,1717 +tzdata/zoneinfo/America/Tortola,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/Vancouver,sha256=Epou71sUffvHB1rd7wT0krvo3okXAV45_TWcOFpy26Q,1330 +tzdata/zoneinfo/America/Virgin,sha256=q76GKN1Uh8iJ24Fs46UHe7tH9rr6_rlBHZLW7y9wzo0,177 +tzdata/zoneinfo/America/Whitehorse,sha256=CyY4jNd0fzNSdf1HlYGfaktApmH71tRNRlpOEO32DGs,1029 +tzdata/zoneinfo/America/Winnipeg,sha256=ANzwYGBU1PknQW4LR-H92i5c4Db95LU-UQhPhWZCjDo,1294 +tzdata/zoneinfo/America/Yakutat,sha256=pvHLVNA1mI-H9fBDnlnpI6B9XzVFQeyvI9nyIkaFNYQ,946 +tzdata/zoneinfo/America/Yellowknife,sha256=Dq2mxcSNWZhMWRqxwwtMcaqwAIGMwkOzz-mW8fJscV8,970 +tzdata/zoneinfo/America/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/America/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/Antarctica/Casey,sha256=1jc-FAjvkKnmCjhz8-yQgEKrN_sVmzAi8DVoy9_K8AQ,287 +tzdata/zoneinfo/Antarctica/Davis,sha256=Pom_267rsoZl6yLvYllu_SW1kixIrSPmsd-HLztn33Y,197 +tzdata/zoneinfo/Antarctica/DumontDUrville,sha256=aDABBVtu-dydiHNODt3ReC8cNkO3wTp16c-OkFIAbhk,154 +tzdata/zoneinfo/Antarctica/Macquarie,sha256=aOZlIzIdTwevaTXoQkDlex2LSFDrg64GvRfcLnfCDAM,976 +tzdata/zoneinfo/Antarctica/Mawson,sha256=UYuiBSE0qZ-2kkBAa6Xq5g9NXg-W_R0P-rl2tlO0jHc,152 +tzdata/zoneinfo/Antarctica/McMurdo,sha256=Dgbn5VrtvJLvWz0Qbnw5KrFijP2KQosg6S6ZAooL-7k,1043 +tzdata/zoneinfo/Antarctica/Palmer,sha256=3MXfhQBaRB57_jqHZMl-M_K48NMFe4zALc7vaMyS5xw,887 +tzdata/zoneinfo/Antarctica/Rothera,sha256=XeddRL2YTDfEWzQI7nDqfW-Tfg-5EebxsHsMHyzGudI,132 +tzdata/zoneinfo/Antarctica/South_Pole,sha256=Dgbn5VrtvJLvWz0Qbnw5KrFijP2KQosg6S6ZAooL-7k,1043 +tzdata/zoneinfo/Antarctica/Syowa,sha256=RoU-lCdq8u6o6GwvFSqHHAkt8ZXcUSc7j8cJH6pLRhw,133 +tzdata/zoneinfo/Antarctica/Troll,sha256=s4z0F_uKzx3biKjEzvHwb56132XRs6IR22fCQglW5GI,158 +tzdata/zoneinfo/Antarctica/Vostok,sha256=cDp-B4wKXE8U5b_zqJIlxdGY-AIAMCTJOZG3bRZBKNc,170 +tzdata/zoneinfo/Antarctica/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/Antarctica/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/Arctic/Longyearbyen,sha256=p_2ZMteF1NaQkAuDTDVjwYEMHPLgFxG8wJJq9sB2fLc,705 +tzdata/zoneinfo/Arctic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/Arctic/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/Asia/Aden,sha256=RoU-lCdq8u6o6GwvFSqHHAkt8ZXcUSc7j8cJH6pLRhw,133 +tzdata/zoneinfo/Asia/Almaty,sha256=87WNMKCF7W2V6tq5LvX5DXWoi9MuwjCAY3f9dgwui4s,618 +tzdata/zoneinfo/Asia/Amman,sha256=KOnKO4_1XRlQvLG61GTbfKImSthwBHMSnzV1ExW8i5Q,928 +tzdata/zoneinfo/Asia/Anadyr,sha256=30bdZurg4Q__lCpH509TE0U7pOcEY6qxjvuPF9ai5yc,743 +tzdata/zoneinfo/Asia/Aqtau,sha256=bRj27vG5HvGegFg5eIKNmq3dfteYmr7KmTs4JFO-7SM,606 +tzdata/zoneinfo/Asia/Aqtobe,sha256=Pm7yI5cmfzx8CGXR2mQJDjtH12KCpx8ezFKchiJVVJ4,615 +tzdata/zoneinfo/Asia/Ashgabat,sha256=OTLHdQ8jFPDvxu_IwKX_c3W3jdN6e7FGoCSEEb0XKuw,375 +tzdata/zoneinfo/Asia/Ashkhabad,sha256=OTLHdQ8jFPDvxu_IwKX_c3W3jdN6e7FGoCSEEb0XKuw,375 +tzdata/zoneinfo/Asia/Atyrau,sha256=1YG4QzLxPRZQeGHiOrbm0cRs8ERTNg1NF9dWEwW2Pi0,616 +tzdata/zoneinfo/Asia/Baghdad,sha256=zFe6LXSfuoJjGsmYTMGjJtBcAMLiKFkD7j7-VaqKwH8,630 +tzdata/zoneinfo/Asia/Bahrain,sha256=YWDWV1o3HHWxnmwlzwMWC53C84ZYPkK_gYn9-P0Xx4U,152 +tzdata/zoneinfo/Asia/Baku,sha256=_Wh6ONaRatMc9lpwGO6zB9pTE38NZ4oWg4_-sZl17mA,744 +tzdata/zoneinfo/Asia/Bangkok,sha256=zcjiwoLYvJpenDyvL8Rf9OnlzRj13sjLhzNArXxYTWQ,152 +tzdata/zoneinfo/Asia/Barnaul,sha256=UGFYJYvtgYVS8Tqsqvj6p0OQCmN3zdY9wITWg8ODG-k,753 +tzdata/zoneinfo/Asia/Beirut,sha256=FgM4gqbWFp6KuUnVn-H8UIXZgTydBeOxDdbebJ0GpUc,732 +tzdata/zoneinfo/Asia/Bishkek,sha256=RXdxVxaiE5zxX5atQl-7ZesEeZVjsCXBGZ6cJbVU9pE,618 +tzdata/zoneinfo/Asia/Brunei,sha256=3ajgII3xZ-Wc-dqXRTSMw8qQRDSjXlSBIxyE_sDRGTk,320 +tzdata/zoneinfo/Asia/Calcutta,sha256=OgC9vhvElZ5ydWfHMLpRsDRV7NRV98GQxa0UOG63mw0,220 +tzdata/zoneinfo/Asia/Chita,sha256=1Lme3ccO47R5gmTe5VCq1BSb0m_1opWibq21zvZlntg,750 +tzdata/zoneinfo/Asia/Choibalsan,sha256=hsakX_o0anB6tNBNp_FKGx4k57IcODYubf1u2G_2Vqk,619 +tzdata/zoneinfo/Asia/Chongqing,sha256=v4t-2C_m5j5tmPjOqTTurJAc0Wq6hetXVc4_i0KJ6oo,393 +tzdata/zoneinfo/Asia/Chungking,sha256=v4t-2C_m5j5tmPjOqTTurJAc0Wq6hetXVc4_i0KJ6oo,393 +tzdata/zoneinfo/Asia/Colombo,sha256=QAyjK7gtXUWfLuju1M0H3_ew6iTM-bwfzO5obgvaHy8,247 +tzdata/zoneinfo/Asia/Dacca,sha256=rCGmEwbW4qkUU2QfTj5zLrydVCq8HTWl1dsqEDQOvvo,231 +tzdata/zoneinfo/Asia/Damascus,sha256=AtZTDRzHEB7QnKxFXvtWsNUI1cCCe27sAfpDfQd0MwY,1234 +tzdata/zoneinfo/Asia/Dhaka,sha256=rCGmEwbW4qkUU2QfTj5zLrydVCq8HTWl1dsqEDQOvvo,231 +tzdata/zoneinfo/Asia/Dili,sha256=ByL6yx7Cuq6axUp5D1n8a9MtmAod_mw6JQP_ltYdOUg,170 +tzdata/zoneinfo/Asia/Dubai,sha256=DZ6lBT6DGIAypvtNMB1dtoj0MBHltrH5F6EbcaDaexY,133 +tzdata/zoneinfo/Asia/Dushanbe,sha256=8qbn76rf9xu47NYVdfGvjnkf2KZxNN5J8ekFiXUz3AQ,366 +tzdata/zoneinfo/Asia/Famagusta,sha256=385fbaRnx-mdEaXqSyBKVBDDKPzCGKbynWYt75wwCug,940 +tzdata/zoneinfo/Asia/Gaza,sha256=-PC__gGODaDGgv5LLzH7ptNLbNdStPkxGY4LmebvcNU,2950 +tzdata/zoneinfo/Asia/Harbin,sha256=v4t-2C_m5j5tmPjOqTTurJAc0Wq6hetXVc4_i0KJ6oo,393 +tzdata/zoneinfo/Asia/Hebron,sha256=4FujfuE-ECIXgKW4pv0lxq2ZkAj7jDwt0rezuA0fFzg,2968 +tzdata/zoneinfo/Asia/Ho_Chi_Minh,sha256=R-ReVMreMcETG0Sifjfe5z-PgQpUsKjT6dVbEKzT3sE,236 +tzdata/zoneinfo/Asia/Hong_Kong,sha256=9AaPcyRtuXQX9zRnRTVkxX1mRs5JCbn6JTaSPvzX608,775 +tzdata/zoneinfo/Asia/Hovd,sha256=eqAvD2RfuIfSDhtqk58MECIjz5X14OHZ7aO4z14kndk,594 +tzdata/zoneinfo/Asia/Irkutsk,sha256=sWxp8g_aSfFan4ZyF9s6-pEX5Vgwxi_jNv7vwN06XIo,760 +tzdata/zoneinfo/Asia/Istanbul,sha256=KnFjsWuUgG9pmRNI59CmDEbrYbHwMF9fS4P2E9sQgG8,1200 +tzdata/zoneinfo/Asia/Jakarta,sha256=4qCZ6kix9xZriNIZsyb3xENz0IkJzZcjtENGlG_Wo4Q,248 +tzdata/zoneinfo/Asia/Jayapura,sha256=BUa0kX1iOdf0E-v7415h7l0lQv4DBCYX_3dAbYmQ0xU,171 +tzdata/zoneinfo/Asia/Jerusalem,sha256=n83o1YTeoFhfXIcnqvNfSKFJ4NvTqDv2zvi8qcFAIeM,1074 +tzdata/zoneinfo/Asia/Kabul,sha256=pNIwTfiSG71BGKvrhKqo1xdxckAx9vfcx5nJanrL81Q,159 +tzdata/zoneinfo/Asia/Kamchatka,sha256=Qix8x3s-m8UTeiwzNPBy_ZQvAzX_aaihz_PzLfTiUac,727 +tzdata/zoneinfo/Asia/Karachi,sha256=ujo4wv-3oa9tfrFT5jsLcEYcjeGeBRgG2QwdXg_ijU4,266 +tzdata/zoneinfo/Asia/Kashgar,sha256=hJyv03dhHML8K0GJGrY8b7M0OUkEXblh_RYmdZMxWtQ,133 +tzdata/zoneinfo/Asia/Kathmandu,sha256=drjxv-ByIxodnn-FATEOJ8DQgEjEj3Qihgtkd8FCxDg,161 +tzdata/zoneinfo/Asia/Katmandu,sha256=drjxv-ByIxodnn-FATEOJ8DQgEjEj3Qihgtkd8FCxDg,161 +tzdata/zoneinfo/Asia/Khandyga,sha256=fdEDOsDJkLuENybqIXtTiI4k2e24dKHDfBTww9AtbSw,775 +tzdata/zoneinfo/Asia/Kolkata,sha256=OgC9vhvElZ5ydWfHMLpRsDRV7NRV98GQxa0UOG63mw0,220 +tzdata/zoneinfo/Asia/Krasnoyarsk,sha256=buNI5S1g7eedK-PpnrLkBFFZDUyCtHxcxXDQGF2ARos,741 +tzdata/zoneinfo/Asia/Kuala_Lumpur,sha256=CVSy2aMB2U9DSAJGBqcbvLL6JNPNNwn1vIvKYFA5eF0,256 +tzdata/zoneinfo/Asia/Kuching,sha256=3ajgII3xZ-Wc-dqXRTSMw8qQRDSjXlSBIxyE_sDRGTk,320 +tzdata/zoneinfo/Asia/Kuwait,sha256=RoU-lCdq8u6o6GwvFSqHHAkt8ZXcUSc7j8cJH6pLRhw,133 +tzdata/zoneinfo/Asia/Macao,sha256=mr89i_wpMoWhAtqZrF2SGcoILcUw6rYrDkIUNADes7E,791 +tzdata/zoneinfo/Asia/Macau,sha256=mr89i_wpMoWhAtqZrF2SGcoILcUw6rYrDkIUNADes7E,791 +tzdata/zoneinfo/Asia/Magadan,sha256=wAufMGWL_s1Aw2l3myAfBFtrROVPes3dMoNuDEoNwT8,751 +tzdata/zoneinfo/Asia/Makassar,sha256=NV9j_RTuiU47mvJvfKE8daXH5AFYJ8Ki4gvHBJSxyLc,190 +tzdata/zoneinfo/Asia/Manila,sha256=Vk8aVoXR_edPDnARFdmEui4pq4Q3yNuiPUCzeIAPLBI,238 +tzdata/zoneinfo/Asia/Muscat,sha256=DZ6lBT6DGIAypvtNMB1dtoj0MBHltrH5F6EbcaDaexY,133 +tzdata/zoneinfo/Asia/Nicosia,sha256=TYYqWp8sK0AwBUHAp0wuuihZuQ19RXdt28bth33zOBI,597 +tzdata/zoneinfo/Asia/Novokuznetsk,sha256=aYW9rpcxpf_zrOZc2vmpcqgiuCRKMHB1lMrioI43KCw,726 +tzdata/zoneinfo/Asia/Novosibirsk,sha256=I2n4MCElad9sMcyJAAc4YdVT6ewbhR79OoAAuhEJfCY,753 +tzdata/zoneinfo/Asia/Omsk,sha256=y7u47EObB3wI8MxKHBRTFM-BEZZqhGpzDg7x5lcwJXY,741 +tzdata/zoneinfo/Asia/Oral,sha256=Q-Gf85NIvdAtU52Zkgf78rVHPlg85xyMe9Zm9ybh0po,625 +tzdata/zoneinfo/Asia/Phnom_Penh,sha256=zcjiwoLYvJpenDyvL8Rf9OnlzRj13sjLhzNArXxYTWQ,152 +tzdata/zoneinfo/Asia/Pontianak,sha256=o0x0jNTlwjiUqAzGX_HlzvCMru2zUURgQ4xzpS95xds,247 +tzdata/zoneinfo/Asia/Pyongyang,sha256=NxC5da8oTZ4StiFQnlhjlp9FTRuMM-Xwsq3Yg4y0xkA,183 +tzdata/zoneinfo/Asia/Qatar,sha256=YWDWV1o3HHWxnmwlzwMWC53C84ZYPkK_gYn9-P0Xx4U,152 +tzdata/zoneinfo/Asia/Qostanay,sha256=5tZkj1o0p4vaREsPO0YgIiw6eDf1cqO52x-0EMg_2L4,624 +tzdata/zoneinfo/Asia/Qyzylorda,sha256=JltKDEnuHmIQGYdFTAJMDDpdDA_HxjJOAHHaV7kFrlQ,624 +tzdata/zoneinfo/Asia/Rangoon,sha256=6J2DXIEdTaRKqLOGeCzogo3whaoO6PJWYamIHS8A6Qw,187 +tzdata/zoneinfo/Asia/Riyadh,sha256=RoU-lCdq8u6o6GwvFSqHHAkt8ZXcUSc7j8cJH6pLRhw,133 +tzdata/zoneinfo/Asia/Saigon,sha256=R-ReVMreMcETG0Sifjfe5z-PgQpUsKjT6dVbEKzT3sE,236 +tzdata/zoneinfo/Asia/Sakhalin,sha256=M_TBd-03j-3Yc9KwhGEoBTwSJxWO1lPBG7ndst16PGo,755 +tzdata/zoneinfo/Asia/Samarkand,sha256=KZ_q-6GMDVgJb8RFqcrbVcPC0WLczolClC4nZA1HVNU,366 +tzdata/zoneinfo/Asia/Seoul,sha256=ZKcLb7zJtl52Lb0l64m29AwTcUbtyNvU0IHq-s2reN4,415 +tzdata/zoneinfo/Asia/Shanghai,sha256=v4t-2C_m5j5tmPjOqTTurJAc0Wq6hetXVc4_i0KJ6oo,393 +tzdata/zoneinfo/Asia/Singapore,sha256=CVSy2aMB2U9DSAJGBqcbvLL6JNPNNwn1vIvKYFA5eF0,256 +tzdata/zoneinfo/Asia/Srednekolymsk,sha256=06mojetFbDd4ag1p8NK0Fg6rF2OOnZMFRRC90N2ATZc,742 +tzdata/zoneinfo/Asia/Taipei,sha256=oEwscvT3aoMXjQNt2X0VfuHzLkeORN2npcEJI2h-5s8,511 +tzdata/zoneinfo/Asia/Tashkent,sha256=0vpN2gI9GY50z1nea6zCPFf2B6VCu6XQQHx4l6rhnTI,366 +tzdata/zoneinfo/Asia/Tbilisi,sha256=ON_Uzv2VTSk6mRefNU-aI-qkqtCoUX6oECVqpeS42eI,629 +tzdata/zoneinfo/Asia/Tehran,sha256=ozLlhNXzpJCZx7bc-VpcmNdgdtn6lPtF6f9qkaDEycI,812 +tzdata/zoneinfo/Asia/Tel_Aviv,sha256=n83o1YTeoFhfXIcnqvNfSKFJ4NvTqDv2zvi8qcFAIeM,1074 +tzdata/zoneinfo/Asia/Thimbu,sha256=N6d_vfFvYORfMnr1fHJjYSt4DBORSbLi_2T-r2dJBnI,154 +tzdata/zoneinfo/Asia/Thimphu,sha256=N6d_vfFvYORfMnr1fHJjYSt4DBORSbLi_2T-r2dJBnI,154 +tzdata/zoneinfo/Asia/Tokyo,sha256=WaOHFDDw07k-YZ-jCkOkHR6IvdSf8m8J0PQFpQBwb5Y,213 +tzdata/zoneinfo/Asia/Tomsk,sha256=Bf7GoFTcUeP2hYyuYpruJji33tcEoLP-80o38A6i4zU,753 +tzdata/zoneinfo/Asia/Ujung_Pandang,sha256=NV9j_RTuiU47mvJvfKE8daXH5AFYJ8Ki4gvHBJSxyLc,190 +tzdata/zoneinfo/Asia/Ulaanbaatar,sha256=--I8P6_e4BtRIe3wCSkPtwHOu_k9rPsw-KqQKHJC9vM,594 +tzdata/zoneinfo/Asia/Ulan_Bator,sha256=--I8P6_e4BtRIe3wCSkPtwHOu_k9rPsw-KqQKHJC9vM,594 +tzdata/zoneinfo/Asia/Urumqi,sha256=hJyv03dhHML8K0GJGrY8b7M0OUkEXblh_RYmdZMxWtQ,133 +tzdata/zoneinfo/Asia/Ust-Nera,sha256=6NkuV1zOms-4qHQhq-cGc-cqEVgKHk7qd3MLDM-e2BA,771 +tzdata/zoneinfo/Asia/Vientiane,sha256=zcjiwoLYvJpenDyvL8Rf9OnlzRj13sjLhzNArXxYTWQ,152 +tzdata/zoneinfo/Asia/Vladivostok,sha256=zkOXuEDgpxX8HQGgDlh9SbAQzHOaNxX2XSI6Y4gMD-k,742 +tzdata/zoneinfo/Asia/Yakutsk,sha256=xD6zA4E228dC1mIUQ7cMO-9LORSfE-Fok0awGDG6juk,741 +tzdata/zoneinfo/Asia/Yangon,sha256=6J2DXIEdTaRKqLOGeCzogo3whaoO6PJWYamIHS8A6Qw,187 +tzdata/zoneinfo/Asia/Yekaterinburg,sha256=q17eUyqOEK2LJYKXYLCJqylj-vmaCG2vSNMttqrQTRk,760 +tzdata/zoneinfo/Asia/Yerevan,sha256=pLEBdchA8H9l-9hdA6FjHmwaj5T1jupK0u-bor1KKa0,708 +tzdata/zoneinfo/Asia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/Asia/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/Atlantic/Azores,sha256=KmvA_G-yNl76C0A17JdtFg7ju9LHa5JIWh15GOzLxds,1453 +tzdata/zoneinfo/Atlantic/Bermuda,sha256=PuxqD2cD99Pzjb8hH99Dws053d_zXnZHjeH0kZ8LSLI,1024 +tzdata/zoneinfo/Atlantic/Canary,sha256=XMmxBlscPIWXhiauKy_d5bxX4xjNMM-5Vw84FwZkT00,478 +tzdata/zoneinfo/Atlantic/Cape_Verde,sha256=E5ss6xpIpD0g_VEDsFMFi-ltsebp98PBSpULoVxIAyU,175 +tzdata/zoneinfo/Atlantic/Faeroe,sha256=Iw0qB0mBuviH5w3Qy8jaxCOes07ZHh2wkW8MPUWJqj0,441 +tzdata/zoneinfo/Atlantic/Faroe,sha256=Iw0qB0mBuviH5w3Qy8jaxCOes07ZHh2wkW8MPUWJqj0,441 +tzdata/zoneinfo/Atlantic/Jan_Mayen,sha256=p_2ZMteF1NaQkAuDTDVjwYEMHPLgFxG8wJJq9sB2fLc,705 +tzdata/zoneinfo/Atlantic/Madeira,sha256=IX1jlaiB-DaaGwjnfc5pYr8eEtX7_Wol-T50QNAs3qw,1453 +tzdata/zoneinfo/Atlantic/Reykjavik,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130 +tzdata/zoneinfo/Atlantic/South_Georgia,sha256=kPGfCLQD2C6_Xc5TyAmqmXP-GYdLLPucpBn3S7ybWu8,132 +tzdata/zoneinfo/Atlantic/St_Helena,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130 +tzdata/zoneinfo/Atlantic/Stanley,sha256=QqQd8IWklNapMKjN5vF7vvVn4K-yl3VKvM5zkCKabCM,789 +tzdata/zoneinfo/Atlantic/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/Atlantic/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/Australia/ACT,sha256=gg1FqGioj4HHMdWyx1i07QAAObYmCoBDP44PCUpgS1k,904 +tzdata/zoneinfo/Australia/Adelaide,sha256=Gk1SdGRVmB233I-WETXAMCZz7L7HVzoN4aUoIcgNr3g,921 +tzdata/zoneinfo/Australia/Brisbane,sha256=2kVWz9CI_qtfdb55g0iL59gUBC7lnO3GUalIQxtHADY,289 +tzdata/zoneinfo/Australia/Broken_Hill,sha256=dzk9LvGA_xRStnAIjAFuTJ8Uwz_s7qGWGQmiXPgDsLY,941 +tzdata/zoneinfo/Australia/Canberra,sha256=gg1FqGioj4HHMdWyx1i07QAAObYmCoBDP44PCUpgS1k,904 +tzdata/zoneinfo/Australia/Currie,sha256=1IAVgf0AA3sBPXFhaxGfu9UQ_cpd4GNpsQ9xio2l4y0,1003 +tzdata/zoneinfo/Australia/Darwin,sha256=ZoexbhgdUlV4leV-dhBu6AxDVkJy43xrPb9UQ3EQCdI,234 +tzdata/zoneinfo/Australia/Eucla,sha256=3NqsFfMzR6-lSUPViNXBAOyJPqyokisse7uDXurURpk,314 +tzdata/zoneinfo/Australia/Hobart,sha256=1IAVgf0AA3sBPXFhaxGfu9UQ_cpd4GNpsQ9xio2l4y0,1003 +tzdata/zoneinfo/Australia/LHI,sha256=82i9JWWcApPQK7eex9rH1bc6kt_6_OFLTdL_uLoRqto,692 +tzdata/zoneinfo/Australia/Lindeman,sha256=iHkCc0QJ7iaQffiTTXQVJ2swsC7QJxLUMHQOGCFlkTk,325 +tzdata/zoneinfo/Australia/Lord_Howe,sha256=82i9JWWcApPQK7eex9rH1bc6kt_6_OFLTdL_uLoRqto,692 +tzdata/zoneinfo/Australia/Melbourne,sha256=X7JPMEj_SYWyfgWFMkp6FOmT6GfyjR-lF9hFGgTavnE,904 +tzdata/zoneinfo/Australia/NSW,sha256=gg1FqGioj4HHMdWyx1i07QAAObYmCoBDP44PCUpgS1k,904 +tzdata/zoneinfo/Australia/North,sha256=ZoexbhgdUlV4leV-dhBu6AxDVkJy43xrPb9UQ3EQCdI,234 +tzdata/zoneinfo/Australia/Perth,sha256=ZsuelcBC1YfWugH2CrlOXQcSDD4gGUJCobB1W-aupHo,306 +tzdata/zoneinfo/Australia/Queensland,sha256=2kVWz9CI_qtfdb55g0iL59gUBC7lnO3GUalIQxtHADY,289 +tzdata/zoneinfo/Australia/South,sha256=Gk1SdGRVmB233I-WETXAMCZz7L7HVzoN4aUoIcgNr3g,921 +tzdata/zoneinfo/Australia/Sydney,sha256=gg1FqGioj4HHMdWyx1i07QAAObYmCoBDP44PCUpgS1k,904 +tzdata/zoneinfo/Australia/Tasmania,sha256=1IAVgf0AA3sBPXFhaxGfu9UQ_cpd4GNpsQ9xio2l4y0,1003 +tzdata/zoneinfo/Australia/Victoria,sha256=X7JPMEj_SYWyfgWFMkp6FOmT6GfyjR-lF9hFGgTavnE,904 +tzdata/zoneinfo/Australia/West,sha256=ZsuelcBC1YfWugH2CrlOXQcSDD4gGUJCobB1W-aupHo,306 +tzdata/zoneinfo/Australia/Yancowinna,sha256=dzk9LvGA_xRStnAIjAFuTJ8Uwz_s7qGWGQmiXPgDsLY,941 +tzdata/zoneinfo/Australia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/Australia/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/Brazil/Acre,sha256=VjuQUr668phq5bcH40r94BPnZBKHzJf_MQBfM6Db96U,418 +tzdata/zoneinfo/Brazil/DeNoronha,sha256=Q0r3GtA5y2RGkOj56OTZG5tuBy1B6kfbhyrJqCgf27g,484 +tzdata/zoneinfo/Brazil/East,sha256=-izrIi8GXAKJ85l_8MVLoFp0pZm0Uihw-oapbiThiJE,952 +tzdata/zoneinfo/Brazil/West,sha256=9kgrhpryB94YOVoshJliiiDSf9mwjb3OZwX0HusNRrk,412 +tzdata/zoneinfo/Brazil/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/Brazil/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/CET,sha256=9q70fJErxHX0_hfgu5Wk0oH5ZZLUWhBIHJI1z7gHgBI,621 +tzdata/zoneinfo/CST6CDT,sha256=ajbQjR1ESk2m3dg1sAR2slqafjcfIhw-SC4SC6F7VBY,951 +tzdata/zoneinfo/Canada/Atlantic,sha256=kO5ahBM2oTLfWS4KX15FbKXfo5wg-f9vw1_hMOISGig,1672 +tzdata/zoneinfo/Canada/Central,sha256=ANzwYGBU1PknQW4LR-H92i5c4Db95LU-UQhPhWZCjDo,1294 +tzdata/zoneinfo/Canada/Eastern,sha256=gVq023obEpKGfS-SS3GOG7oyRVzp-SIF2y_rZQKcZ2E,1717 +tzdata/zoneinfo/Canada/Mountain,sha256=Dq2mxcSNWZhMWRqxwwtMcaqwAIGMwkOzz-mW8fJscV8,970 +tzdata/zoneinfo/Canada/Newfoundland,sha256=v99q_AFMPll5MMxMp98aqY40cmis2wciTfTqs2_kb0k,1878 +tzdata/zoneinfo/Canada/Pacific,sha256=Epou71sUffvHB1rd7wT0krvo3okXAV45_TWcOFpy26Q,1330 +tzdata/zoneinfo/Canada/Saskatchewan,sha256=_JHuns225iE-THc9NFp-RBq4PWULAuGw2OLbpOB_UMw,638 +tzdata/zoneinfo/Canada/Yukon,sha256=CyY4jNd0fzNSdf1HlYGfaktApmH71tRNRlpOEO32DGs,1029 +tzdata/zoneinfo/Canada/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/Canada/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/Chile/Continental,sha256=_QBpU8K0QqLh5m2yqWfdkypIJDkPAc3dnIAc5jRQxxU,1354 +tzdata/zoneinfo/Chile/EasterIsland,sha256=EwVM74XjsboPVxK9bWmdd4nTrtvasP1zlLdxrMB_YaE,1174 +tzdata/zoneinfo/Chile/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/Chile/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/Cuba,sha256=ms5rCuq2yBM49VmTymMtFQN3c5aBN1lkd8jjzKdnNm8,1117 +tzdata/zoneinfo/EET,sha256=ftIfVTZNlKejEciANKFFxES2uv_Z4rTAgyjwvk1lLpE,497 +tzdata/zoneinfo/EST,sha256=Eqcp0sCDGh_NPbcYAbBhmUob540rIs8FUnkmkZDQ0go,111 +tzdata/zoneinfo/EST5EDT,sha256=RAPR1jPCcVa5nvibF24lGKApc2bRw3Y87RbesyI3BP4,951 +tzdata/zoneinfo/Egypt,sha256=icuaNiEvuC6TPc2fqhDv36lpop7IDDIGO7tFGMAz0b4,1309 +tzdata/zoneinfo/Eire,sha256=EcADNuAvExj-dkqylGfF8q_vv_-mRPqN0k9bCDtJW3E,1496 +tzdata/zoneinfo/Etc/GMT,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111 +tzdata/zoneinfo/Etc/GMT+0,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111 +tzdata/zoneinfo/Etc/GMT+1,sha256=5L9o8TEUgtB11poIag85vRdq08LMDZmZ6DPn7UqPL_g,113 +tzdata/zoneinfo/Etc/GMT+10,sha256=IvBxiqQU76qzNbuxRo8Ah9rPQSRGQGKp_SRs5u1PPkM,114 +tzdata/zoneinfo/Etc/GMT+11,sha256=9MfFpFp_rt9PksMjQ23VOlir3hzTlnLz_5V2tfonhbU,114 +tzdata/zoneinfo/Etc/GMT+12,sha256=l26XCFp9IbgXGvMw7NHgHzIZbHry2B5qGYfhMDHFVrw,114 +tzdata/zoneinfo/Etc/GMT+2,sha256=YbbqH7B6jNoQEIjyV4-8a2cXD9lGC3vQKnEkY2ucDGI,113 +tzdata/zoneinfo/Etc/GMT+3,sha256=q3D9DLfmTBUAo4YMnNUNUUKrAkKSwM5Q-vesd9A6SZQ,113 +tzdata/zoneinfo/Etc/GMT+4,sha256=UghKME3laXSDZ7q74YDb4FcLnzNqXQydcZpQHvssP2k,113 +tzdata/zoneinfo/Etc/GMT+5,sha256=TZ5qaoELlszW_Z5FdqAEMKk8Y_xu5XhZBNZUco55SrM,113 +tzdata/zoneinfo/Etc/GMT+6,sha256=_2k3LZ5x8hVjMwwmCx6GqUwW-v1IvOkBrJjYH5bD6Qw,113 +tzdata/zoneinfo/Etc/GMT+7,sha256=Di8J430WGr98Ww95tdfIo8hGxkVQfJvlx55ansDuoeQ,113 +tzdata/zoneinfo/Etc/GMT+8,sha256=OIIlUFhZwL2ctx3fxINbY2HDDAmSQ7i2ZAUgX7Exjgw,113 +tzdata/zoneinfo/Etc/GMT+9,sha256=1vpkIoPqBiwDWzH-fLFxwNbmdKRY7mqdiJhYQImVxaw,113 +tzdata/zoneinfo/Etc/GMT-0,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111 +tzdata/zoneinfo/Etc/GMT-1,sha256=S81S9Z0-V-0B5U-0S0Pnbx8fv2iHtwE1LrlZk-ckLto,114 +tzdata/zoneinfo/Etc/GMT-10,sha256=VvdG5IpXB_xJX4omzfrrHblkRUzkbCZXPhTrLngc7vk,115 +tzdata/zoneinfo/Etc/GMT-11,sha256=2sYLfVuDFSy7Kc1WOPiY1EqquHw5Xx4HbDA1QOL1hc4,115 +tzdata/zoneinfo/Etc/GMT-12,sha256=ifHVhk5fczZG3GDy_Nv7YsLNaxf8stB4MrzgWUCINlU,115 +tzdata/zoneinfo/Etc/GMT-13,sha256=CMkORdXsaSyL-4N0n37Cyc1lCr22ZsWyug9_QZVe0E0,115 +tzdata/zoneinfo/Etc/GMT-14,sha256=NK07ElwueU0OP8gORtcXUUug_3v4d04uxfVHMUnLM9U,115 +tzdata/zoneinfo/Etc/GMT-2,sha256=QMToMLcif1S4SNPOMxMtBLqc1skUYnIhbUAjKEdAf9w,114 +tzdata/zoneinfo/Etc/GMT-3,sha256=10GMvfulaJwDQiHiWEJiU_YURyjDfPcl5ugnYBugN3E,114 +tzdata/zoneinfo/Etc/GMT-4,sha256=c6Kx3v41GRkrvky8k71db_UJbpyyp2OZCsjDSvjkr6s,114 +tzdata/zoneinfo/Etc/GMT-5,sha256=94TvO8e_8t52bs8ry70nAquvgK8qJKQTI7lQnVCHX-U,114 +tzdata/zoneinfo/Etc/GMT-6,sha256=3fH8eX--0iDijmYAQHQ0IUXheezaj6-aadZsQNAB4fE,114 +tzdata/zoneinfo/Etc/GMT-7,sha256=DnsTJ3NUYYGLUwFb_L15U_GbaMF-acLVsPyTNySyH-M,114 +tzdata/zoneinfo/Etc/GMT-8,sha256=kvGQUwONDBG7nhEp_wESc4xl4xNXiXEivxAv09nkr_g,114 +tzdata/zoneinfo/Etc/GMT-9,sha256=U1WRFGWQAW91JXK99gY1K9d0rFZYDWHzDUR3z71Lh6Y,114 +tzdata/zoneinfo/Etc/GMT0,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111 +tzdata/zoneinfo/Etc/Greenwich,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111 +tzdata/zoneinfo/Etc/UCT,sha256=_dzh5kihcyrCmv2aFhUbKXPN8ILn7AxpD35CvmtZi5M,111 +tzdata/zoneinfo/Etc/UTC,sha256=_dzh5kihcyrCmv2aFhUbKXPN8ILn7AxpD35CvmtZi5M,111 +tzdata/zoneinfo/Etc/Universal,sha256=_dzh5kihcyrCmv2aFhUbKXPN8ILn7AxpD35CvmtZi5M,111 +tzdata/zoneinfo/Etc/Zulu,sha256=_dzh5kihcyrCmv2aFhUbKXPN8ILn7AxpD35CvmtZi5M,111 +tzdata/zoneinfo/Etc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/Etc/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/Europe/Amsterdam,sha256=sQ-VQqhQnwpj68p449gEMt2GuOopZAAoD-vZz6dugog,1103 +tzdata/zoneinfo/Europe/Andorra,sha256=leuTyE4uduIBX0aHb_7PK_KlslpWSyS6e0SS84hKFrE,389 +tzdata/zoneinfo/Europe/Astrakhan,sha256=P3E5UDgQ4gqsMi-KdMAWwOSStogdcNl9rLMVUdpFLXI,726 +tzdata/zoneinfo/Europe/Athens,sha256=8f1niwVI4ymziTT2KBJV5pjfp2GtH_hB9sy3lgbGE0U,682 +tzdata/zoneinfo/Europe/Belfast,sha256=Z2VB8LitRXx0TAk_gHWJrcrZCeP9A_kBeH0IeG7tvTM,1599 +tzdata/zoneinfo/Europe/Belgrade,sha256=qMlk8-qnognZplD7FsaMAD6aX8Yv-7sQ-oSdVPs2YtY,478 +tzdata/zoneinfo/Europe/Berlin,sha256=p_2ZMteF1NaQkAuDTDVjwYEMHPLgFxG8wJJq9sB2fLc,705 +tzdata/zoneinfo/Europe/Bratislava,sha256=pukw4zdc3LUffYp0iFr_if0UuGHrt1yzOdD5HBbBRpo,723 +tzdata/zoneinfo/Europe/Brussels,sha256=sQ-VQqhQnwpj68p449gEMt2GuOopZAAoD-vZz6dugog,1103 +tzdata/zoneinfo/Europe/Bucharest,sha256=iY74H96aaTMJvmqAhzUoSI8SjZUtPvv4PGF4ClwFm6U,661 +tzdata/zoneinfo/Europe/Budapest,sha256=qNr-valoDI1mevuQXqOMkOhIcT194EczOKIijxrDMV8,766 +tzdata/zoneinfo/Europe/Busingen,sha256=GZBiscMM_rI3XshMVt9SvlGJGYamKTt6Ek06YlCfRek,497 +tzdata/zoneinfo/Europe/Chisinau,sha256=5TPhkCtxxa0ByLCv7YxOrc5Vtdui2v2VX8vrSopPkPs,755 +tzdata/zoneinfo/Europe/Copenhagen,sha256=p_2ZMteF1NaQkAuDTDVjwYEMHPLgFxG8wJJq9sB2fLc,705 +tzdata/zoneinfo/Europe/Dublin,sha256=EcADNuAvExj-dkqylGfF8q_vv_-mRPqN0k9bCDtJW3E,1496 +tzdata/zoneinfo/Europe/Gibraltar,sha256=t1hglDTLUIFqs91nY5lulN7oxkoAXHnh0zjyaKG2bG8,1220 +tzdata/zoneinfo/Europe/Guernsey,sha256=Z2VB8LitRXx0TAk_gHWJrcrZCeP9A_kBeH0IeG7tvTM,1599 +tzdata/zoneinfo/Europe/Helsinki,sha256=ccpK9ZmPCZkMXoddNQ_DyONPKAuub-FPNtRpL6znpWM,481 +tzdata/zoneinfo/Europe/Isle_of_Man,sha256=Z2VB8LitRXx0TAk_gHWJrcrZCeP9A_kBeH0IeG7tvTM,1599 +tzdata/zoneinfo/Europe/Istanbul,sha256=KnFjsWuUgG9pmRNI59CmDEbrYbHwMF9fS4P2E9sQgG8,1200 +tzdata/zoneinfo/Europe/Jersey,sha256=Z2VB8LitRXx0TAk_gHWJrcrZCeP9A_kBeH0IeG7tvTM,1599 +tzdata/zoneinfo/Europe/Kaliningrad,sha256=57ov9G8m25w1pPdJF8zoFWzq5I6UoBMVsk2eHPelbA8,904 +tzdata/zoneinfo/Europe/Kiev,sha256=BYnoDd7Ov50wd4mMEpddK-c5PfKFbumSbFNHY-Hia_I,558 +tzdata/zoneinfo/Europe/Kirov,sha256=KqXGcIbMGTuOoKZYBG-5bj7kVzFbKyGMA99PA0414D0,735 +tzdata/zoneinfo/Europe/Kyiv,sha256=BYnoDd7Ov50wd4mMEpddK-c5PfKFbumSbFNHY-Hia_I,558 +tzdata/zoneinfo/Europe/Lisbon,sha256=Nr-w4MM_s8Zhwdu1D4cNOQiTZMwZibYswSH1nB1GUKg,1454 +tzdata/zoneinfo/Europe/Ljubljana,sha256=qMlk8-qnognZplD7FsaMAD6aX8Yv-7sQ-oSdVPs2YtY,478 +tzdata/zoneinfo/Europe/London,sha256=Z2VB8LitRXx0TAk_gHWJrcrZCeP9A_kBeH0IeG7tvTM,1599 +tzdata/zoneinfo/Europe/Luxembourg,sha256=sQ-VQqhQnwpj68p449gEMt2GuOopZAAoD-vZz6dugog,1103 +tzdata/zoneinfo/Europe/Madrid,sha256=ylsyHdv8iOB-DQPtL6DIMs5dDdjn2QolIAqOJImMOyE,897 +tzdata/zoneinfo/Europe/Malta,sha256=irX_nDD-BXYObaduu_vhPe1F31xmgL364dSOaT_OVco,928 +tzdata/zoneinfo/Europe/Mariehamn,sha256=ccpK9ZmPCZkMXoddNQ_DyONPKAuub-FPNtRpL6znpWM,481 +tzdata/zoneinfo/Europe/Minsk,sha256=86iP_xDtidkUCqjkoKhH5_El3VI21fSgoIiXl_BzUaU,808 +tzdata/zoneinfo/Europe/Monaco,sha256=zViOd5xXN9cOTkcVja-reUWwJrK7NEVMxHdBgVRZsGg,1105 +tzdata/zoneinfo/Europe/Moscow,sha256=7S4KCZ-0RrJBZoNDjT9W-fxaYqFsdUmn9Zy8k1s2TIo,908 +tzdata/zoneinfo/Europe/Nicosia,sha256=TYYqWp8sK0AwBUHAp0wuuihZuQ19RXdt28bth33zOBI,597 +tzdata/zoneinfo/Europe/Oslo,sha256=p_2ZMteF1NaQkAuDTDVjwYEMHPLgFxG8wJJq9sB2fLc,705 +tzdata/zoneinfo/Europe/Paris,sha256=zViOd5xXN9cOTkcVja-reUWwJrK7NEVMxHdBgVRZsGg,1105 +tzdata/zoneinfo/Europe/Podgorica,sha256=qMlk8-qnognZplD7FsaMAD6aX8Yv-7sQ-oSdVPs2YtY,478 +tzdata/zoneinfo/Europe/Prague,sha256=pukw4zdc3LUffYp0iFr_if0UuGHrt1yzOdD5HBbBRpo,723 +tzdata/zoneinfo/Europe/Riga,sha256=PU8amev-8XVvl4B_JUOOOM1ofSMbotp-3MPGPHpPoTw,694 +tzdata/zoneinfo/Europe/Rome,sha256=hr0moG_jBXs2zyndejOPJSSv-BFu8I0AWqIRTqYSKGk,947 +tzdata/zoneinfo/Europe/Samara,sha256=Vc60AJe-0-b8prNiFwZTUS1bCbWxxuEnnNcgp8YkQRY,732 +tzdata/zoneinfo/Europe/San_Marino,sha256=hr0moG_jBXs2zyndejOPJSSv-BFu8I0AWqIRTqYSKGk,947 +tzdata/zoneinfo/Europe/Sarajevo,sha256=qMlk8-qnognZplD7FsaMAD6aX8Yv-7sQ-oSdVPs2YtY,478 +tzdata/zoneinfo/Europe/Saratov,sha256=0fN3eVFVewG-DSVk9xJABDQB1S_Nyn37bHOjj5X8Bm0,726 +tzdata/zoneinfo/Europe/Simferopol,sha256=y2Nybf9LGVNqNdW_GPS-NIDRLriyH_pyxKpT0zmATK4,865 +tzdata/zoneinfo/Europe/Skopje,sha256=qMlk8-qnognZplD7FsaMAD6aX8Yv-7sQ-oSdVPs2YtY,478 +tzdata/zoneinfo/Europe/Sofia,sha256=LQjC-OJkL4TzZcqD-JUofDAg1-qJui_2Ri6Eoii2MuQ,592 +tzdata/zoneinfo/Europe/Stockholm,sha256=p_2ZMteF1NaQkAuDTDVjwYEMHPLgFxG8wJJq9sB2fLc,705 +tzdata/zoneinfo/Europe/Tallinn,sha256=R6yRfPqESOYQWftlncDWo_fQak61eeiEQKwg_C-C7W8,675 +tzdata/zoneinfo/Europe/Tirane,sha256=I-alATWRd8mfSgvnr3dN_F9vbTB66alvz2GQo0LUbPc,604 +tzdata/zoneinfo/Europe/Tiraspol,sha256=5TPhkCtxxa0ByLCv7YxOrc5Vtdui2v2VX8vrSopPkPs,755 +tzdata/zoneinfo/Europe/Ulyanovsk,sha256=2vK0XahtB_dKjDDXccjMjbQ2bAOfKDe66uMDqtjzHm4,760 +tzdata/zoneinfo/Europe/Uzhgorod,sha256=BYnoDd7Ov50wd4mMEpddK-c5PfKFbumSbFNHY-Hia_I,558 +tzdata/zoneinfo/Europe/Vaduz,sha256=GZBiscMM_rI3XshMVt9SvlGJGYamKTt6Ek06YlCfRek,497 +tzdata/zoneinfo/Europe/Vatican,sha256=hr0moG_jBXs2zyndejOPJSSv-BFu8I0AWqIRTqYSKGk,947 +tzdata/zoneinfo/Europe/Vienna,sha256=q8_UF23-KHqc2ay4ju0qT1TuBSpRTnlB7i6vElk4eJw,658 +tzdata/zoneinfo/Europe/Vilnius,sha256=hXvv1PaQndapT7hdywPO3738Y3ZqbW_hJx87khyaOPM,676 +tzdata/zoneinfo/Europe/Volgograd,sha256=v3P6iFJ-rThJprVNDxB7ZYDrimtsW7IvQi_gJpZiJOQ,753 +tzdata/zoneinfo/Europe/Warsaw,sha256=6I9aUfFoFXpBrC3YpO4OmoeUGchMYSK0dxsaKjPZOkw,923 +tzdata/zoneinfo/Europe/Zagreb,sha256=qMlk8-qnognZplD7FsaMAD6aX8Yv-7sQ-oSdVPs2YtY,478 +tzdata/zoneinfo/Europe/Zaporozhye,sha256=BYnoDd7Ov50wd4mMEpddK-c5PfKFbumSbFNHY-Hia_I,558 +tzdata/zoneinfo/Europe/Zurich,sha256=GZBiscMM_rI3XshMVt9SvlGJGYamKTt6Ek06YlCfRek,497 +tzdata/zoneinfo/Europe/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/Europe/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/Factory,sha256=0ytXntCnQnMWvqJgue4mdUUQRr1YxXxnnCTyZxhgr3Y,113 +tzdata/zoneinfo/GB,sha256=Z2VB8LitRXx0TAk_gHWJrcrZCeP9A_kBeH0IeG7tvTM,1599 +tzdata/zoneinfo/GB-Eire,sha256=Z2VB8LitRXx0TAk_gHWJrcrZCeP9A_kBeH0IeG7tvTM,1599 +tzdata/zoneinfo/GMT,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111 +tzdata/zoneinfo/GMT+0,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111 +tzdata/zoneinfo/GMT-0,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111 +tzdata/zoneinfo/GMT0,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111 +tzdata/zoneinfo/Greenwich,sha256=3EoHVxsQiE5PTzRQydGhy_TAPvU9Bu0uTqFS2eul1dc,111 +tzdata/zoneinfo/HST,sha256=up2TB-9E2uBD6IGaCSOnR96o_DENUVI9ZCE1zQS0SzY,112 +tzdata/zoneinfo/Hongkong,sha256=9AaPcyRtuXQX9zRnRTVkxX1mRs5JCbn6JTaSPvzX608,775 +tzdata/zoneinfo/Iceland,sha256=8-f8qg6YQP9BadNWfY-1kmZEhI9JY9es-SMghDxdSG4,130 +tzdata/zoneinfo/Indian/Antananarivo,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191 +tzdata/zoneinfo/Indian/Chagos,sha256=J_aS7rs0ZG1dPTGeokXxNJpF4Pds8u1ct49cRtX7giY,152 +tzdata/zoneinfo/Indian/Christmas,sha256=zcjiwoLYvJpenDyvL8Rf9OnlzRj13sjLhzNArXxYTWQ,152 +tzdata/zoneinfo/Indian/Cocos,sha256=6J2DXIEdTaRKqLOGeCzogo3whaoO6PJWYamIHS8A6Qw,187 +tzdata/zoneinfo/Indian/Comoro,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191 +tzdata/zoneinfo/Indian/Kerguelen,sha256=lEhfD1j4QnZ-wtuTU51fw6-yvc4WZz2eY8CYjMzWQ44,152 +tzdata/zoneinfo/Indian/Mahe,sha256=DZ6lBT6DGIAypvtNMB1dtoj0MBHltrH5F6EbcaDaexY,133 +tzdata/zoneinfo/Indian/Maldives,sha256=lEhfD1j4QnZ-wtuTU51fw6-yvc4WZz2eY8CYjMzWQ44,152 +tzdata/zoneinfo/Indian/Mauritius,sha256=R6pdJalrHVK5LlGOmEsyD66_-c5a9ptJM-xE71Fo8hQ,179 +tzdata/zoneinfo/Indian/Mayotte,sha256=B4OFT1LDOtprbSpdhnZi8K6OFSONL857mtpPTTGetGY,191 +tzdata/zoneinfo/Indian/Reunion,sha256=DZ6lBT6DGIAypvtNMB1dtoj0MBHltrH5F6EbcaDaexY,133 +tzdata/zoneinfo/Indian/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/Indian/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/Iran,sha256=ozLlhNXzpJCZx7bc-VpcmNdgdtn6lPtF6f9qkaDEycI,812 +tzdata/zoneinfo/Israel,sha256=n83o1YTeoFhfXIcnqvNfSKFJ4NvTqDv2zvi8qcFAIeM,1074 +tzdata/zoneinfo/Jamaica,sha256=pDexcAMzrv9TqLWGjVOHwIDcFMLT6Vqlzjb5AbNmkoQ,339 +tzdata/zoneinfo/Japan,sha256=WaOHFDDw07k-YZ-jCkOkHR6IvdSf8m8J0PQFpQBwb5Y,213 +tzdata/zoneinfo/Kwajalein,sha256=S-ZFi6idKzDaelLy7DRjGPeD0s7oVud3xLMxZKNlBk8,219 +tzdata/zoneinfo/Libya,sha256=zzMBLZZh4VQ4_ARe5k4L_rsuqKP7edKvVt8F6kvj5FM,431 +tzdata/zoneinfo/MET,sha256=EgkGCb0euba8FQGgUqAYFx4mRuKeRD6W5GIAyV6yDJ0,621 +tzdata/zoneinfo/MST,sha256=84AZayGFK2nfpYS0-u16q9QWrYYkCwUJcNdOnG7Ai1s,111 +tzdata/zoneinfo/MST7MDT,sha256=yt9ENOc1sfICs1yxJjiii6FhCQkEsEuw67zvs-EeBb4,951 +tzdata/zoneinfo/Mexico/BajaNorte,sha256=x2_eWDUWxIi5gKTGmM_d5V1HFt1-JN-j8dIpqj5Dn7M,1025 +tzdata/zoneinfo/Mexico/BajaSur,sha256=C5CBj73KgB8vbDbDEgqMHfPeMeglQj156WNbwYSxux8,718 +tzdata/zoneinfo/Mexico/General,sha256=vhDy1hSceJyFa3bIqn2qRi1kgxtvrCCaaB7s65mljtY,773 +tzdata/zoneinfo/Mexico/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/Mexico/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/NZ,sha256=Dgbn5VrtvJLvWz0Qbnw5KrFijP2KQosg6S6ZAooL-7k,1043 +tzdata/zoneinfo/NZ-CHAT,sha256=pnhY_Lb8V4eo6cK3yL6JZL086SI_etG6rCycppJfTHg,808 +tzdata/zoneinfo/Navajo,sha256=m7cDkg7KS2EZ6BoQVYOk9soiBlHxO0GEeat81WxBPz4,1042 +tzdata/zoneinfo/PRC,sha256=v4t-2C_m5j5tmPjOqTTurJAc0Wq6hetXVc4_i0KJ6oo,393 +tzdata/zoneinfo/PST8PDT,sha256=8w8p5P18af0k8f2C3amKrvi4tSK83QUhUCV6QmyeTa8,951 +tzdata/zoneinfo/Pacific/Apia,sha256=3HDEfICrLIehq3VLq4_r_DhQgFniSd_lXnOjdZgI6hQ,407 +tzdata/zoneinfo/Pacific/Auckland,sha256=Dgbn5VrtvJLvWz0Qbnw5KrFijP2KQosg6S6ZAooL-7k,1043 +tzdata/zoneinfo/Pacific/Bougainville,sha256=rqdn1Y4HSarx-vjPk00lsHNfhj3IQgKCViAsumuN_IY,201 +tzdata/zoneinfo/Pacific/Chatham,sha256=pnhY_Lb8V4eo6cK3yL6JZL086SI_etG6rCycppJfTHg,808 +tzdata/zoneinfo/Pacific/Chuuk,sha256=aDABBVtu-dydiHNODt3ReC8cNkO3wTp16c-OkFIAbhk,154 +tzdata/zoneinfo/Pacific/Easter,sha256=EwVM74XjsboPVxK9bWmdd4nTrtvasP1zlLdxrMB_YaE,1174 +tzdata/zoneinfo/Pacific/Efate,sha256=LiX_rTfipQh_Vnqb_m7OGxyBtyAUC9UANVKHUpLoCcU,342 +tzdata/zoneinfo/Pacific/Enderbury,sha256=ojOG-oqi25HOnY6BFhav_3bmWg1LDILT4v-kxOFVuqI,172 +tzdata/zoneinfo/Pacific/Fakaofo,sha256=Uf8zeML2X8doPg8CX-p0mMGP-IOj7aHAMe7ULD5khxA,153 +tzdata/zoneinfo/Pacific/Fiji,sha256=umCNhtTuBziTXne-WAxzvYvGKqZxTYOTwK-tJhYh4MQ,396 +tzdata/zoneinfo/Pacific/Funafuti,sha256=CQNWIL2DFpej6Qcvgt40z8pekS1QyNpUdzmqLyj7bY4,134 +tzdata/zoneinfo/Pacific/Galapagos,sha256=Z1KJPZSvO8M_Pay9WLcNAxzjo8imPrQ7FnXNOXfZl8c,175 +tzdata/zoneinfo/Pacific/Gambier,sha256=yIh86hjpDk1wRWTVJROOGqn9tkc7e9_O6zNxqs-wBoM,132 +tzdata/zoneinfo/Pacific/Guadalcanal,sha256=Ui8PN0th4sb1-n0Z8ceszNCeSiE0Yu47QskNMr8r8Yw,134 +tzdata/zoneinfo/Pacific/Guam,sha256=i57eM6syriUFvAbrVALnziCw_I4lENyzBcJdOaH71yU,350 +tzdata/zoneinfo/Pacific/Honolulu,sha256=HapXKaoeDzLNRL4RLQGtTMVnqf522H3LuRgr6NLIj_A,221 +tzdata/zoneinfo/Pacific/Johnston,sha256=HapXKaoeDzLNRL4RLQGtTMVnqf522H3LuRgr6NLIj_A,221 +tzdata/zoneinfo/Pacific/Kanton,sha256=ojOG-oqi25HOnY6BFhav_3bmWg1LDILT4v-kxOFVuqI,172 +tzdata/zoneinfo/Pacific/Kiritimati,sha256=cUVGmMRBgllfuYJ3X0B0zg0Bf-LPo9l7Le5ju882dx4,174 +tzdata/zoneinfo/Pacific/Kosrae,sha256=pQMLJXilygPhlkm0jCo5JuVmpmYJgLIdiTVxeP59ZEg,242 +tzdata/zoneinfo/Pacific/Kwajalein,sha256=S-ZFi6idKzDaelLy7DRjGPeD0s7oVud3xLMxZKNlBk8,219 +tzdata/zoneinfo/Pacific/Majuro,sha256=CQNWIL2DFpej6Qcvgt40z8pekS1QyNpUdzmqLyj7bY4,134 +tzdata/zoneinfo/Pacific/Marquesas,sha256=ilprkRvn-N1XjptSI_0ZwUjeuokP-5l64uKjRBp0kxw,139 +tzdata/zoneinfo/Pacific/Midway,sha256=ZQ2Rh1E2ZZBVMGPNaBWS_cqKCZV-DOLBjWaX7Dhe95Y,146 +tzdata/zoneinfo/Pacific/Nauru,sha256=wahZONjreNAmYwhQ2CWdKMAE3SVm4S2aYvMZqcAlSYc,183 +tzdata/zoneinfo/Pacific/Niue,sha256=8WWebtgCnrMBKjuLNEYEWlktNI2op2kkKgk0Vcz8GaM,154 +tzdata/zoneinfo/Pacific/Norfolk,sha256=vL8G6W5CScYqp76g0b15UPIYHw2Lt60qOktHUF7caDs,237 +tzdata/zoneinfo/Pacific/Noumea,sha256=ezUyn7AYWBblrZbStlItJYu7XINCLiihrCBZB-Bl-Qw,198 +tzdata/zoneinfo/Pacific/Pago_Pago,sha256=ZQ2Rh1E2ZZBVMGPNaBWS_cqKCZV-DOLBjWaX7Dhe95Y,146 +tzdata/zoneinfo/Pacific/Palau,sha256=VkLRsKUUVXo3zrhAXn9iM-pKySbGIVfzWoopDhmceMA,148 +tzdata/zoneinfo/Pacific/Pitcairn,sha256=AJh6olJxXQzCMWKOE5ye4jHfgg1VA-9-gCZ5MbrX_8E,153 +tzdata/zoneinfo/Pacific/Pohnpei,sha256=Ui8PN0th4sb1-n0Z8ceszNCeSiE0Yu47QskNMr8r8Yw,134 +tzdata/zoneinfo/Pacific/Ponape,sha256=Ui8PN0th4sb1-n0Z8ceszNCeSiE0Yu47QskNMr8r8Yw,134 +tzdata/zoneinfo/Pacific/Port_Moresby,sha256=aDABBVtu-dydiHNODt3ReC8cNkO3wTp16c-OkFIAbhk,154 +tzdata/zoneinfo/Pacific/Rarotonga,sha256=J6a2mOrTp4bsZNovj3HjJK9AVJ89PhdEpQMMVD__i18,406 +tzdata/zoneinfo/Pacific/Saipan,sha256=i57eM6syriUFvAbrVALnziCw_I4lENyzBcJdOaH71yU,350 +tzdata/zoneinfo/Pacific/Samoa,sha256=ZQ2Rh1E2ZZBVMGPNaBWS_cqKCZV-DOLBjWaX7Dhe95Y,146 +tzdata/zoneinfo/Pacific/Tahiti,sha256=Ivcs04hthxEQj1I_6aACc70By0lmxlvhgGFYh843e14,133 +tzdata/zoneinfo/Pacific/Tarawa,sha256=CQNWIL2DFpej6Qcvgt40z8pekS1QyNpUdzmqLyj7bY4,134 +tzdata/zoneinfo/Pacific/Tongatapu,sha256=mjGjNSUATfw0yLGB0zsLxz3_L1uWxPANML8K4HQQIMY,237 +tzdata/zoneinfo/Pacific/Truk,sha256=aDABBVtu-dydiHNODt3ReC8cNkO3wTp16c-OkFIAbhk,154 +tzdata/zoneinfo/Pacific/Wake,sha256=CQNWIL2DFpej6Qcvgt40z8pekS1QyNpUdzmqLyj7bY4,134 +tzdata/zoneinfo/Pacific/Wallis,sha256=CQNWIL2DFpej6Qcvgt40z8pekS1QyNpUdzmqLyj7bY4,134 +tzdata/zoneinfo/Pacific/Yap,sha256=aDABBVtu-dydiHNODt3ReC8cNkO3wTp16c-OkFIAbhk,154 +tzdata/zoneinfo/Pacific/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/Pacific/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/Poland,sha256=6I9aUfFoFXpBrC3YpO4OmoeUGchMYSK0dxsaKjPZOkw,923 +tzdata/zoneinfo/Portugal,sha256=Nr-w4MM_s8Zhwdu1D4cNOQiTZMwZibYswSH1nB1GUKg,1454 +tzdata/zoneinfo/ROC,sha256=oEwscvT3aoMXjQNt2X0VfuHzLkeORN2npcEJI2h-5s8,511 +tzdata/zoneinfo/ROK,sha256=ZKcLb7zJtl52Lb0l64m29AwTcUbtyNvU0IHq-s2reN4,415 +tzdata/zoneinfo/Singapore,sha256=CVSy2aMB2U9DSAJGBqcbvLL6JNPNNwn1vIvKYFA5eF0,256 +tzdata/zoneinfo/Turkey,sha256=KnFjsWuUgG9pmRNI59CmDEbrYbHwMF9fS4P2E9sQgG8,1200 +tzdata/zoneinfo/UCT,sha256=_dzh5kihcyrCmv2aFhUbKXPN8ILn7AxpD35CvmtZi5M,111 +tzdata/zoneinfo/US/Alaska,sha256=d8oMIpYvBpmLzl5I2By4ZaFEZsg_9dxgfqpIM0QFi_Y,977 +tzdata/zoneinfo/US/Aleutian,sha256=q_sZgOINX4TsX9iBx1gNd6XGwBnzCjg6qpdAQhK0ieA,969 +tzdata/zoneinfo/US/Arizona,sha256=rhFFPCHQiYTedfLv7ATckxeKe04jxeUvIJi4vUXMtUc,240 +tzdata/zoneinfo/US/Central,sha256=wntzn_RqffBZThINcltDkhfhHkTqmlDNxJEwODtUguc,1754 +tzdata/zoneinfo/US/East-Indiana,sha256=5nj0KhPvvXvg8mqc5T4EscKKWC6rBWEcsBwWg2Qy8Hs,531 +tzdata/zoneinfo/US/Eastern,sha256=1_IgazpFmJ_JrWPVWJIlMvpzUigNX4cXa_HbecsdH6k,1744 +tzdata/zoneinfo/US/Hawaii,sha256=HapXKaoeDzLNRL4RLQGtTMVnqf522H3LuRgr6NLIj_A,221 +tzdata/zoneinfo/US/Indiana-Starke,sha256=KJCzXct8CTMItVLYLYeBqM6aT6b53gWCg6aDbsH58oI,1016 +tzdata/zoneinfo/US/Michigan,sha256=I4F8Mt9nx38AF6D-steYskBa_HHO6jKU1-W0yRFr50A,899 +tzdata/zoneinfo/US/Mountain,sha256=m7cDkg7KS2EZ6BoQVYOk9soiBlHxO0GEeat81WxBPz4,1042 +tzdata/zoneinfo/US/Pacific,sha256=IA0FdU9tg6Nxz0CNcIUSV5dlezsL6-uh5QjP_oaj5cg,1294 +tzdata/zoneinfo/US/Samoa,sha256=ZQ2Rh1E2ZZBVMGPNaBWS_cqKCZV-DOLBjWaX7Dhe95Y,146 +tzdata/zoneinfo/US/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/US/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/UTC,sha256=_dzh5kihcyrCmv2aFhUbKXPN8ILn7AxpD35CvmtZi5M,111 +tzdata/zoneinfo/Universal,sha256=_dzh5kihcyrCmv2aFhUbKXPN8ILn7AxpD35CvmtZi5M,111 +tzdata/zoneinfo/W-SU,sha256=7S4KCZ-0RrJBZoNDjT9W-fxaYqFsdUmn9Zy8k1s2TIo,908 +tzdata/zoneinfo/WET,sha256=pAiBtwIi4Sqi79_Ppm2V4VMiMrJKOUvMdCZTJeAizAc,494 +tzdata/zoneinfo/Zulu,sha256=_dzh5kihcyrCmv2aFhUbKXPN8ILn7AxpD35CvmtZi5M,111 +tzdata/zoneinfo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +tzdata/zoneinfo/__pycache__/__init__.cpython-310.pyc,, +tzdata/zoneinfo/iso3166.tab,sha256=oBpdFY8x1GrY5vjMKgbGQYEGgqk5fUYDIPaNVCG2XnE,4791 +tzdata/zoneinfo/leapseconds,sha256=fjC39Eu3wB6I4g7x_VL7HzvDVbiKbLUjfQAEgo7442I,3257 +tzdata/zoneinfo/tzdata.zi,sha256=q6xnElaYdX6HiShmu3FxIV9CeWaQHmnF9XLjV2-bYv0,109388 +tzdata/zoneinfo/zone.tab,sha256=qSLfeCWE3tsCDIIQbr71DMkmCUXTIUEgNZgfN-60d-Y,18846 +tzdata/zoneinfo/zone1970.tab,sha256=FJErvL9wggoFluO2WceYn8ZQ-nA9A073Lub1x2Pzg40,17582 +tzdata/zoneinfo/zonenow.tab,sha256=YoPd7huhHsKlJliOO-eMIBE5-bHBKpbfjkSJQFAto6I,8311 +tzdata/zones,sha256=W13GrYuma2VrkfW_VDfQpCt0Ivs2tvvYE4I63b0Z6jM,9084 diff --git a/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..4724c45738f6ac125bb3a21787855562e6870440 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..0883ff0705bf11267d4d921b9bf48392b3e23889 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tzdata-2024.1.dist-info/top_level.txt @@ -0,0 +1 @@ +tzdata