diff --git a/llmeval-env/lib/python3.10/site-packages/_yaml/__init__.py b/llmeval-env/lib/python3.10/site-packages/_yaml/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7baa8c4b68127d5cdf0be9a799429e61347c2694 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/_yaml/__init__.py @@ -0,0 +1,33 @@ +# This is a stub package designed to roughly emulate the _yaml +# extension module, which previously existed as a standalone module +# and has been moved into the `yaml` package namespace. +# It does not perfectly mimic its old counterpart, but should get +# close enough for anyone who's relying on it even when they shouldn't. +import yaml + +# in some circumstances, the yaml module we imoprted may be from a different version, so we need +# to tread carefully when poking at it here (it may not have the attributes we expect) +if not getattr(yaml, '__with_libyaml__', False): + from sys import version_info + + exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError + raise exc("No module named '_yaml'") +else: + from yaml._yaml import * + import warnings + warnings.warn( + 'The _yaml extension module is now located at yaml._yaml' + ' and its location is subject to change. To use the' + ' LibYAML-based parser and emitter, import from `yaml`:' + ' `from yaml import CLoader as Loader, CDumper as Dumper`.', + DeprecationWarning + ) + del warnings + # Don't `del yaml` here because yaml is actually an existing + # namespace member of _yaml. + +__name__ = '_yaml' +# If the module is top-level (i.e. not a part of any specific package) +# then the attribute should be set to ''. +# https://docs.python.org/3.8/library/types.html +__package__ = '' diff --git a/llmeval-env/lib/python3.10/site-packages/_yaml/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/_yaml/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..844c2dc8e8052a2e4e8b54a9c3207e77acc6fb81 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/_yaml/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/async_timeout/__init__.py b/llmeval-env/lib/python3.10/site-packages/async_timeout/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1ffb069fce9b2b9a03515404155a7e5cc439484a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/async_timeout/__init__.py @@ -0,0 +1,239 @@ +import asyncio +import enum +import sys +import warnings +from types import TracebackType +from typing import Optional, Type + + +if sys.version_info >= (3, 8): + from typing import final +else: + from typing_extensions import final + + +if sys.version_info >= (3, 11): + + def _uncancel_task(task: "asyncio.Task[object]") -> None: + task.uncancel() + +else: + + def _uncancel_task(task: "asyncio.Task[object]") -> None: + pass + + +__version__ = "4.0.3" + + +__all__ = ("timeout", "timeout_at", "Timeout") + + +def timeout(delay: Optional[float]) -> "Timeout": + """timeout context manager. + + Useful in cases when you want to apply timeout logic around block + of code or in cases when asyncio.wait_for is not suitable. For example: + + >>> async with timeout(0.001): + ... async with aiohttp.get('https://github.com') as r: + ... await r.text() + + + delay - value in seconds or None to disable timeout logic + """ + loop = asyncio.get_running_loop() + if delay is not None: + deadline = loop.time() + delay # type: Optional[float] + else: + deadline = None + return Timeout(deadline, loop) + + +def timeout_at(deadline: Optional[float]) -> "Timeout": + """Schedule the timeout at absolute time. + + deadline argument points on the time in the same clock system + as loop.time(). + + Please note: it is not POSIX time but a time with + undefined starting base, e.g. the time of the system power on. + + >>> async with timeout_at(loop.time() + 10): + ... async with aiohttp.get('https://github.com') as r: + ... await r.text() + + + """ + loop = asyncio.get_running_loop() + return Timeout(deadline, loop) + + +class _State(enum.Enum): + INIT = "INIT" + ENTER = "ENTER" + TIMEOUT = "TIMEOUT" + EXIT = "EXIT" + + +@final +class Timeout: + # Internal class, please don't instantiate it directly + # Use timeout() and timeout_at() public factories instead. + # + # Implementation note: `async with timeout()` is preferred + # over `with timeout()`. + # While technically the Timeout class implementation + # doesn't need to be async at all, + # the `async with` statement explicitly points that + # the context manager should be used from async function context. + # + # This design allows to avoid many silly misusages. + # + # TimeoutError is raised immediately when scheduled + # if the deadline is passed. + # The purpose is to time out as soon as possible + # without waiting for the next await expression. + + __slots__ = ("_deadline", "_loop", "_state", "_timeout_handler", "_task") + + def __init__( + self, deadline: Optional[float], loop: asyncio.AbstractEventLoop + ) -> None: + self._loop = loop + self._state = _State.INIT + + self._task: Optional["asyncio.Task[object]"] = None + self._timeout_handler = None # type: Optional[asyncio.Handle] + if deadline is None: + self._deadline = None # type: Optional[float] + else: + self.update(deadline) + + def __enter__(self) -> "Timeout": + warnings.warn( + "with timeout() is deprecated, use async with timeout() instead", + DeprecationWarning, + stacklevel=2, + ) + self._do_enter() + return self + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + self._do_exit(exc_type) + return None + + async def __aenter__(self) -> "Timeout": + self._do_enter() + return self + + async def __aexit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> Optional[bool]: + self._do_exit(exc_type) + return None + + @property + def expired(self) -> bool: + """Is timeout expired during execution?""" + return self._state == _State.TIMEOUT + + @property + def deadline(self) -> Optional[float]: + return self._deadline + + def reject(self) -> None: + """Reject scheduled timeout if any.""" + # cancel is maybe better name but + # task.cancel() raises CancelledError in asyncio world. + if self._state not in (_State.INIT, _State.ENTER): + raise RuntimeError(f"invalid state {self._state.value}") + self._reject() + + def _reject(self) -> None: + self._task = None + if self._timeout_handler is not None: + self._timeout_handler.cancel() + self._timeout_handler = None + + def shift(self, delay: float) -> None: + """Advance timeout on delay seconds. + + The delay can be negative. + + Raise RuntimeError if shift is called when deadline is not scheduled + """ + deadline = self._deadline + if deadline is None: + raise RuntimeError("cannot shift timeout if deadline is not scheduled") + self.update(deadline + delay) + + def update(self, deadline: float) -> None: + """Set deadline to absolute value. + + deadline argument points on the time in the same clock system + as loop.time(). + + If new deadline is in the past the timeout is raised immediately. + + Please note: it is not POSIX time but a time with + undefined starting base, e.g. the time of the system power on. + """ + if self._state == _State.EXIT: + raise RuntimeError("cannot reschedule after exit from context manager") + if self._state == _State.TIMEOUT: + raise RuntimeError("cannot reschedule expired timeout") + if self._timeout_handler is not None: + self._timeout_handler.cancel() + self._deadline = deadline + if self._state != _State.INIT: + self._reschedule() + + def _reschedule(self) -> None: + assert self._state == _State.ENTER + deadline = self._deadline + if deadline is None: + return + + now = self._loop.time() + if self._timeout_handler is not None: + self._timeout_handler.cancel() + + self._task = asyncio.current_task() + if deadline <= now: + self._timeout_handler = self._loop.call_soon(self._on_timeout) + else: + self._timeout_handler = self._loop.call_at(deadline, self._on_timeout) + + def _do_enter(self) -> None: + if self._state != _State.INIT: + raise RuntimeError(f"invalid state {self._state.value}") + self._state = _State.ENTER + self._reschedule() + + def _do_exit(self, exc_type: Optional[Type[BaseException]]) -> None: + if exc_type is asyncio.CancelledError and self._state == _State.TIMEOUT: + assert self._task is not None + _uncancel_task(self._task) + self._timeout_handler = None + self._task = None + raise asyncio.TimeoutError + # timeout has not expired + self._state = _State.EXIT + self._reject() + return None + + def _on_timeout(self) -> None: + assert self._task is not None + self._task.cancel() + self._state = _State.TIMEOUT + # drop the reference early + self._timeout_handler = None diff --git a/llmeval-env/lib/python3.10/site-packages/async_timeout/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/async_timeout/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b8fa38e699f65a8d54f38ba74c475bfbc92a04fa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/async_timeout/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/async_timeout/py.typed b/llmeval-env/lib/python3.10/site-packages/async_timeout/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..3b94f915737aba1f12a0f067fdba3726bfe02df5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/async_timeout/py.typed @@ -0,0 +1 @@ +Placeholder diff --git a/llmeval-env/lib/python3.10/site-packages/filelock-3.14.0.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/filelock-3.14.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/filelock-3.14.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/filelock-3.14.0.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/filelock-3.14.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..04033b670050419f5c80ab33020fa27ca53f14bd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/filelock-3.14.0.dist-info/METADATA @@ -0,0 +1,56 @@ +Metadata-Version: 2.3 +Name: filelock +Version: 3.14.0 +Summary: A platform independent file lock. +Project-URL: Documentation, https://py-filelock.readthedocs.io +Project-URL: Homepage, https://github.com/tox-dev/py-filelock +Project-URL: Source, https://github.com/tox-dev/py-filelock +Project-URL: Tracker, https://github.com/tox-dev/py-filelock/issues +Maintainer-email: Bernát Gábor +License-Expression: Unlicense +License-File: LICENSE +Keywords: application,cache,directory,log,user +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: The Unlicense (Unlicense) +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Internet +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: System +Requires-Python: >=3.8 +Provides-Extra: docs +Requires-Dist: furo>=2023.9.10; extra == 'docs' +Requires-Dist: sphinx-autodoc-typehints!=1.23.4,>=1.25.2; extra == 'docs' +Requires-Dist: sphinx>=7.2.6; extra == 'docs' +Provides-Extra: testing +Requires-Dist: covdefaults>=2.3; extra == 'testing' +Requires-Dist: coverage>=7.3.2; extra == 'testing' +Requires-Dist: diff-cover>=8.0.1; extra == 'testing' +Requires-Dist: pytest-cov>=4.1; extra == 'testing' +Requires-Dist: pytest-mock>=3.12; extra == 'testing' +Requires-Dist: pytest-timeout>=2.2; extra == 'testing' +Requires-Dist: pytest>=7.4.3; extra == 'testing' +Provides-Extra: typing +Requires-Dist: typing-extensions>=4.8; (python_version < '3.11') and extra == 'typing' +Description-Content-Type: text/markdown + +# filelock + +[![PyPI](https://img.shields.io/pypi/v/filelock)](https://pypi.org/project/filelock/) +[![Supported Python +versions](https://img.shields.io/pypi/pyversions/filelock.svg)](https://pypi.org/project/filelock/) +[![Documentation +status](https://readthedocs.org/projects/py-filelock/badge/?version=latest)](https://py-filelock.readthedocs.io/en/latest/?badge=latest) +[![Code style: +black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +[![Downloads](https://static.pepy.tech/badge/filelock/month)](https://pepy.tech/project/filelock) +[![check](https://github.com/tox-dev/py-filelock/actions/workflows/check.yml/badge.svg)](https://github.com/tox-dev/py-filelock/actions/workflows/check.yml) + +For more information checkout the [official documentation](https://py-filelock.readthedocs.io/en/latest/index.html). diff --git a/llmeval-env/lib/python3.10/site-packages/filelock-3.14.0.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/filelock-3.14.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..b7ea9de659973b29d788fb77ac22cd34a5806046 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/filelock-3.14.0.dist-info/RECORD @@ -0,0 +1,22 @@ +filelock-3.14.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +filelock-3.14.0.dist-info/METADATA,sha256=nSDS4Bqd1Rtvf-PB_bDjn0Dgu2kuL2GGCE1vM33KrlA,2792 +filelock-3.14.0.dist-info/RECORD,, +filelock-3.14.0.dist-info/WHEEL,sha256=zEMcRr9Kr03x1ozGwg5v9NQBKn3kndp6LSoSlVg-jhU,87 +filelock-3.14.0.dist-info/licenses/LICENSE,sha256=iNm062BXnBkew5HKBMFhMFctfu3EqG2qWL8oxuFMm80,1210 +filelock/__init__.py,sha256=c_wPy9Fo0fmqE4V448Q2A2dDYnMTZeAVHX-kUoN3KCY,1214 +filelock/__pycache__/__init__.cpython-310.pyc,, +filelock/__pycache__/_api.cpython-310.pyc,, +filelock/__pycache__/_error.cpython-310.pyc,, +filelock/__pycache__/_soft.cpython-310.pyc,, +filelock/__pycache__/_unix.cpython-310.pyc,, +filelock/__pycache__/_util.cpython-310.pyc,, +filelock/__pycache__/_windows.cpython-310.pyc,, +filelock/__pycache__/version.cpython-310.pyc,, +filelock/_api.py,sha256=YrQpa5D3mB5VTEUZ8DHO1REvVu-WWmFRYgNaV3e6jOg,13028 +filelock/_error.py,sha256=-5jMcjTu60YAvAO1UbqDD1GIEjVkwr8xCFwDBtMeYDg,787 +filelock/_soft.py,sha256=haqtc_TB_KJbYv2a8iuEAclKuM4fMG1vTcp28sK919c,1711 +filelock/_unix.py,sha256=-FXP0tjInBHUYygOlMpp4taUmD87QOkrD_4ybg_iT7Q,2259 +filelock/_util.py,sha256=QHBoNFIYfbAThhotH3Q8E2acFc84wpG49-T-uu017ZE,1715 +filelock/_windows.py,sha256=eMKL8dZKrgekf5VYVGR14an29JGEInRtUO8ui9ABywg,2177 +filelock/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +filelock/version.py,sha256=Ipjekae6alpGZC2b94mJAE2S2ZyJybTBe3oNCWsIFS4,413 diff --git a/llmeval-env/lib/python3.10/site-packages/filelock-3.14.0.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/filelock-3.14.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..516596c76787b10928cbab24f22c0ea00433b15d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/filelock-3.14.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.24.2 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/llmeval-env/lib/python3.10/site-packages/filelock-3.14.0.dist-info/licenses/LICENSE b/llmeval-env/lib/python3.10/site-packages/filelock-3.14.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..cf1ab25da0349f84a3fdd40032f0ce99db813b8b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/filelock-3.14.0.dist-info/licenses/LICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__init__.py b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4cb36263b095ebba9e9a1150f9c38f6ac6019be8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__init__.py @@ -0,0 +1,13 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from .__version__ import __author__, __copyright__, __email__, __license__, __version__ +from ._func import detect_file_encoding +from ._mbstrdecoder import MultiByteStrDecoder + + +__all__ = ( + "detect_file_encoding", + "MultiByteStrDecoder", +) diff --git a/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97a6671688a769ce492d4784e76ee538bce725eb Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__pycache__/__version__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__pycache__/__version__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b1564c992b6b44a1a27ac474092e4233f913977 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__pycache__/__version__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_binary_ext_checker.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_binary_ext_checker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad2f29d2ac309b27d487dc650f1bec3a5f5fc55f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_binary_ext_checker.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_func.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_func.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1819245e71353bfa72bd2a8b3291ef144e6ab42 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_func.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_mbstrdecoder.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_mbstrdecoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e97c4de512813389e7fac692bc7e5e72c19c33a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__pycache__/_mbstrdecoder.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__version__.py b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__version__.py new file mode 100644 index 0000000000000000000000000000000000000000..1047f5e5099e9ee9c18279e8520927f558c5b26b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/__version__.py @@ -0,0 +1,6 @@ +__author__ = "Tsuyoshi Hombashi" +__copyright__ = f"Copyright 2016, {__author__}" +__license__ = "MIT License" +__version__ = "1.1.3" +__maintainer__ = __author__ +__email__ = "tsuyoshi.hombashi@gmail.com" diff --git a/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/_binary_ext_checker.py b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/_binary_ext_checker.py new file mode 100644 index 0000000000000000000000000000000000000000..217e29c9afdd9fb4949855381f76c0516902b784 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/_binary_ext_checker.py @@ -0,0 +1,264 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import os.path + + +# list from https://github.com/sindresorhus/binary-extensions +binary_exts = ( + "3dm", + "3ds", + "3g2", + "3gp", + "7z", + "a", + "aac", + "adp", + "ai", + "aif", + "aiff", + "alz", + "ape", + "apk", + "ar", + "arj", + "asf", + "au", + "avi", + "bak", + "baml", + "bh", + "bin", + "bk", + "bmp", + "btif", + "bz2", + "bzip2", + "cab", + "caf", + "cgm", + "class", + "cmx", + "cpio", + "cr2", + "cur", + "dat", + "dcm", + "deb", + "dex", + "djvu", + "dll", + "dmg", + "dng", + "doc", + "docm", + "docx", + "dot", + "dotm", + "dra", + "DS_Store", + "dsk", + "dts", + "dtshd", + "dvb", + "dwg", + "dxf", + "ecelp4800", + "ecelp7470", + "ecelp9600", + "egg", + "eol", + "eot", + "epub", + "exe", + "f4v", + "fbs", + "fh", + "fla", + "flac", + "fli", + "flv", + "fpx", + "fst", + "fvt", + "g3", + "gh", + "gif", + "graffle", + "gz", + "gzip", + "h261", + "h263", + "h264", + "icns", + "ico", + "ief", + "img", + "ipa", + "iso", + "jar", + "jpeg", + "jpg", + "jpgv", + "jpm", + "jxr", + "key", + "ktx", + "lha", + "lib", + "lvp", + "lz", + "lzh", + "lzma", + "lzo", + "m3u", + "m4a", + "m4v", + "mar", + "mdi", + "mht", + "mid", + "midi", + "mj2", + "mka", + "mkv", + "mmr", + "mng", + "mobi", + "mov", + "movie", + "mp3", + "mp4", + "mp4a", + "mpeg", + "mpg", + "mpga", + "mxu", + "nef", + "npx", + "numbers", + "nupkg", + "o", + "oga", + "ogg", + "ogv", + "otf", + "pages", + "pbm", + "pcx", + "pdb", + "pdf", + "pea", + "pgm", + "pic", + "png", + "pnm", + "pot", + "potm", + "potx", + "ppa", + "ppam", + "ppm", + "pps", + "ppsm", + "ppsx", + "ppt", + "pptm", + "pptx", + "psd", + "pya", + "pyc", + "pyo", + "pyv", + "qt", + "rar", + "ras", + "raw", + "resources", + "rgb", + "rip", + "rlc", + "rmf", + "rmvb", + "rtf", + "rz", + "s3m", + "s7z", + "scpt", + "sgi", + "shar", + "sil", + "sketch", + "slk", + "smv", + "snk", + "so", + "stl", + "suo", + "sub", + "swf", + "tar", + "tbz", + "tbz2", + "tga", + "tgz", + "thmx", + "tif", + "tiff", + "tlz", + "ttc", + "ttf", + "txz", + "udf", + "uvh", + "uvi", + "uvm", + "uvp", + "uvs", + "uvu", + "viv", + "vob", + "war", + "wav", + "wax", + "wbmp", + "wdp", + "weba", + "webm", + "webp", + "whl", + "wim", + "wm", + "wma", + "wmv", + "wmx", + "woff", + "woff2", + "wrm", + "wvx", + "xbm", + "xif", + "xla", + "xlam", + "xls", + "xlsb", + "xlsm", + "xlsx", + "xlt", + "xltm", + "xltx", + "xm", + "xmind", + "xpi", + "xpm", + "xwd", + "xz", + "z", + "zip", + "zipx", +) + + +def is_binary_ext_path(filepath) -> bool: + return os.path.splitext(filepath)[1].lstrip(".") in binary_exts diff --git a/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/_func.py b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/_func.py new file mode 100644 index 0000000000000000000000000000000000000000..c0e31dbecbd167e1426a7b73aaa91a0c63722878 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/_func.py @@ -0,0 +1,56 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import os +import stat +from errno import EBADF, ENOENT, ENOTDIR +from typing import Optional, Union + +from ._binary_ext_checker import is_binary_ext_path + + +def is_fifo(file_path: Union[int, bytes, str]) -> bool: + try: + return stat.S_ISFIFO(os.stat(file_path).st_mode) + except OSError as e: + if e.errno not in (ENOENT, ENOTDIR, EBADF): + raise + + return False + except ValueError: + return False + + +def to_codec_name(name: Optional[str]) -> Optional[str]: + if not name: + return None + + return name.lower().replace("-", "_") + + +def detect_file_encoding(file_path) -> Optional[str]: + from chardet.universaldetector import UniversalDetector + + if not os.path.isfile(file_path) or is_binary_ext_path(file_path) or is_fifo(file_path): + return None + + detector = UniversalDetector() + READ_SIZE = 4 * 1024 + + try: + with open(file_path, mode="rb") as f: + while True: + binary = f.read(READ_SIZE) + if not binary: + break + + detector.feed(binary) + if detector.done: + break + except OSError: + return None + finally: + detector.close() + + return to_codec_name(detector.result.get("encoding")) diff --git a/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/_mbstrdecoder.py b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/_mbstrdecoder.py new file mode 100644 index 0000000000000000000000000000000000000000..1ed86446290bfde7b84da935b8b9b4beb9c38f99 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/_mbstrdecoder.py @@ -0,0 +1,291 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import copy +import re +from typing import List, Optional, Sequence + +from ._func import to_codec_name + + +def b(s: str) -> bytes: + return s.encode("latin-1") + + +class MultiByteStrDecoder: + """ + Reference: + https://docs.python.org/3/library/codecs.html + """ + + __CODECS = [ + "utf_7", + "utf_8", + "utf_8_sig", + "utf_16", + "utf_16_be", + "utf_16_le", + "utf_32", + "utf_32_be", + "utf_32_le", + "big5", + "big5hkscs", + "cp037", + "cp424", + "cp437", + "cp500", + "cp720", + "cp737", + "cp775", + "cp850", + "cp852", + "cp855", + "cp856", + "cp857", + "cp858", + "cp860", + "cp861", + "cp862", + "cp863", + "cp864", + "cp865", + "cp866", + "cp869", + "cp874", + "cp875", + "cp932", + "cp949", + "cp950", + "cp1006", + "cp1026", + "cp1140", + "cp1250", + "cp1251", + "cp1252", + "cp1253", + "cp1254", + "cp1255", + "cp1256", + "cp1257", + "cp1258", + "euc_jp", + "euc_jis_2004", + "euc_jisx0213", + "euc_kr", + "gb2312", + "gbk", + "gb18030", + "hz", + "iso2022_jp", + "iso2022_jp_1", + "iso2022_jp_2", + "iso2022_jp_2004", + "iso2022_jp_3", + "iso2022_jp_ext", + "iso2022_kr", + "latin_1", + "iso8859_2", + "iso8859_3", + "iso8859_4", + "iso8859_5", + "iso8859_6", + "iso8859_7", + "iso8859_8", + "iso8859_9", + "iso8859_10", + "iso8859_11", + "iso8859_13", + "iso8859_14", + "iso8859_15", + "iso8859_16", + "johab", + "koi8_r", + "koi8_u", + "mac_cyrillic", + "mac_greek", + "mac_iceland", + "mac_latin2", + "mac_roman", + "mac_turkish", + "ptcp154", + "shift_jis", + "shift_jis_2004", + "shift_jisx0213", + "base64_codec", + "bz2_codec", + "hex_codec", + "idna", + "mbcs", + "palmos", + "punycode", + "quopri_codec", + "raw_unicode_escape", + "rot_13", + "string_escape", + "unicode_escape", + "unicode_internal", + "uu_codec", + "zlib_codec", + ] + __RE_UTF7 = re.compile(b("[+].*?[-]")) + + @property + def unicode_str(self) -> str: + return self.__unicode_str + + @property + def codec(self) -> Optional[str]: + return self.__codec + + def __init__(self, value, codec_candidates: Optional[Sequence[str]] = None) -> None: + self.__encoded_str = value + self.__codec: Optional[str] = None + if codec_candidates is None: + self.__codec_candidate_list: List[str] = [] + else: + self.__codec_candidate_list = list(codec_candidates) + + self.__validate_str() + + self.__unicode_str = self.__to_unicode() + + def __repr__(self) -> str: + return f"codec={self.codec:s}, unicode={self.unicode_str:s}" + + def __validate_str(self) -> None: + if isinstance(self.__encoded_str, (str, bytes)): + return + + raise ValueError(f"value must be a string: actual={type(self.__encoded_str)}") + + def __is_buffer(self) -> bool: + return isinstance(self.__encoded_str, memoryview) + + def __is_multibyte_utf7(self, encoded_str) -> bool: + if self.__codec != "utf_7": + return False + + utf7_symbol_count = encoded_str.count(b("+")) + if utf7_symbol_count <= 0: + return False + + if utf7_symbol_count != encoded_str.count(b("-")): + return False + + return utf7_symbol_count == len(self.__RE_UTF7.findall(encoded_str)) + + def __get_encoded_str(self) -> str: + if self.__is_buffer(): + return str(self.__encoded_str) + + return self.__encoded_str + + @staticmethod + def __detect_encoding_helper(encoded_str) -> Optional[str]: + import chardet + + try: + detect = chardet.detect(encoded_str) + except TypeError: + detect = {} # type: ignore + + detect_encoding = detect.get("encoding") + confidence = detect.get("confidence") + + if detect_encoding not in ["ascii", "utf-8"] and confidence and confidence > 0.7: + # utf7 tend to be misrecognized as ascii + return detect_encoding + + return None + + def __get_codec_candidate_list(self, encoded_str) -> List[str]: + codec_candidate_list = copy.deepcopy(self.__CODECS) + detect_encoding = self.__detect_encoding_helper(encoded_str) + + if detect_encoding: + try: + codec_candidate_list.remove(detect_encoding) + except ValueError: + pass + + codec_candidate_list.insert(0, detect_encoding) + + for codec_candidate in self.__codec_candidate_list: + try: + codec_candidate_list.remove(codec_candidate) + except ValueError: + pass + + return self.__codec_candidate_list + codec_candidate_list + + def __to_unicode(self): + encoded_str = self.__get_encoded_str() + + if encoded_str == b"": + self.__codec = "unicode" + return "" + + for codec in self.__get_codec_candidate_list(encoded_str): + if not codec: + continue + + try: + self.__codec = to_codec_name(codec) + decoded_str = encoded_str.decode(codec) + break + except UnicodeDecodeError: + self.__codec = None + continue + except AttributeError: + if isinstance(encoded_str, str): + # already a unicode string (python 3) + self.__codec = "unicode" + + if not encoded_str: + return encoded_str + + return encoded_str + + self.__codec = None + + try: + return f"{encoded_str}" + except UnicodeDecodeError: + # some of the objects that cannot convertible to a string + # may reach this line + raise TypeError("argument must be a string") + else: + self.__codec = None + + try: + message = f"unknown codec: encoded_str={encoded_str}" + except UnicodeDecodeError: + message = f"unknown codec: value-type={type(encoded_str)}" + + raise UnicodeDecodeError(message) + + if self.codec == "utf_7": + return self.__process_utf7(encoded_str, decoded_str) + + return decoded_str + + def __process_utf7(self, encoded_str, decoded_str) -> str: + if not encoded_str: + self.__codec = "unicode" + + return encoded_str + + if self.__is_multibyte_utf7(encoded_str): + try: + decoded_str.encode("ascii") + + self.__codec = "ascii" + + return encoded_str.decode("ascii") + except UnicodeEncodeError: + return decoded_str + + self.__codec = "ascii" + + return encoded_str.decode("ascii") diff --git a/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/py.typed b/llmeval-env/lib/python3.10/site-packages/mbstrdecoder/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..c36b3c91708c206398b57e017ff425b087739ed7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/METADATA @@ -0,0 +1,35 @@ +Metadata-Version: 2.1 +Name: nvidia-nvjitlink-cu12 +Version: 12.4.127 +Summary: Nvidia JIT LTO Library +Home-page: https://developer.nvidia.com/cuda-zone +Author: Nvidia CUDA Installer Team +Author-email: cuda_installer@nvidia.com +License: NVIDIA Proprietary Software +Keywords: cuda,nvidia,runtime,machine learning,deep learning +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: Other/Proprietary License +Classifier: Natural Language :: English +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Topic :: Scientific/Engineering +Classifier: Topic :: Scientific/Engineering :: Mathematics +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Topic :: Software Development +Classifier: Topic :: Software Development :: Libraries +Classifier: Operating System :: Microsoft :: Windows +Classifier: Operating System :: POSIX :: Linux +Requires-Python: >=3 +License-File: License.txt + +NVIDIA compiler library for JIT LTO functionality. diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..946cf1d42db71650aff9b6edc6e1e5107e34cdaa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/RECORD @@ -0,0 +1,16 @@ +nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/__pycache__/__init__.cpython-310.pyc,, +nvidia/nvjitlink/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/nvjitlink/__pycache__/__init__.cpython-310.pyc,, +nvidia/nvjitlink/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/nvjitlink/include/__pycache__/__init__.cpython-310.pyc,, +nvidia/nvjitlink/include/nvJitLink.h,sha256=6P2KtTBXDDOsbxIi0Nr9Ot8_fh9XvsSAaDhaElJwLQU,16496 +nvidia/nvjitlink/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +nvidia/nvjitlink/lib/__pycache__/__init__.cpython-310.pyc,, +nvidia/nvjitlink/lib/libnvJitLink.so.12,sha256=y9FIi774K41kwt1Rqbau8_awvDp6gKmCG-SNHAf3AOc,53594512 +nvidia_nvjitlink_cu12-12.4.127.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +nvidia_nvjitlink_cu12-12.4.127.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262 +nvidia_nvjitlink_cu12-12.4.127.dist-info/METADATA,sha256=Tkm9zx69iT4wHTNBYxiPi30271awlBpcUiAg1itXbn0,1518 +nvidia_nvjitlink_cu12-12.4.127.dist-info/RECORD,, +nvidia_nvjitlink_cu12-12.4.127.dist-info/WHEEL,sha256=XDTs3wIbcE-BcRO08VJlZpA6z9OaC1mOKPCGGGwuM2g,109 +nvidia_nvjitlink_cu12-12.4.127.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7 diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..e6c30e957cfb045017a9fef3430bb8ee87c4a074 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.42.0) +Root-Is-Purelib: true +Tag: py3-none-manylinux2014_x86_64 + diff --git a/llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/nvidia_nvjitlink_cu12-12.4.127.dist-info/top_level.txt @@ -0,0 +1 @@ +nvidia diff --git a/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..67db8588217f266eb561f75fae738656325deac9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..05779fa29c14fc279c2f4a8e13993f9add4e18a9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/METADATA @@ -0,0 +1,122 @@ +Metadata-Version: 2.1 +Name: requests +Version: 2.31.0 +Summary: Python HTTP for Humans. +Home-page: https://requests.readthedocs.io +Author: Kenneth Reitz +Author-email: me@kennethreitz.org +License: Apache 2.0 +Project-URL: Documentation, https://requests.readthedocs.io +Project-URL: Source, https://github.com/psf/requests +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Natural Language :: English +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Internet :: WWW/HTTP +Classifier: Topic :: Software Development :: Libraries +Requires-Python: >=3.7 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: charset-normalizer (<4,>=2) +Requires-Dist: idna (<4,>=2.5) +Requires-Dist: urllib3 (<3,>=1.21.1) +Requires-Dist: certifi (>=2017.4.17) +Provides-Extra: security +Provides-Extra: socks +Requires-Dist: PySocks (!=1.5.7,>=1.5.6) ; extra == 'socks' +Provides-Extra: use_chardet_on_py3 +Requires-Dist: chardet (<6,>=3.0.2) ; extra == 'use_chardet_on_py3' + +# Requests + +**Requests** is a simple, yet elegant, HTTP library. + +```python +>>> import requests +>>> r = requests.get('https://httpbin.org/basic-auth/user/pass', auth=('user', 'pass')) +>>> r.status_code +200 +>>> r.headers['content-type'] +'application/json; charset=utf8' +>>> r.encoding +'utf-8' +>>> r.text +'{"authenticated": true, ...' +>>> r.json() +{'authenticated': True, ...} +``` + +Requests allows you to send HTTP/1.1 requests extremely easily. There’s no need to manually add query strings to your URLs, or to form-encode your `PUT` & `POST` data — but nowadays, just use the `json` method! + +Requests is one of the most downloaded Python packages today, pulling in around `30M downloads / week`— according to GitHub, Requests is currently [depended upon](https://github.com/psf/requests/network/dependents?package_id=UGFja2FnZS01NzA4OTExNg%3D%3D) by `1,000,000+` repositories. You may certainly put your trust in this code. + +[![Downloads](https://pepy.tech/badge/requests/month)](https://pepy.tech/project/requests) +[![Supported Versions](https://img.shields.io/pypi/pyversions/requests.svg)](https://pypi.org/project/requests) +[![Contributors](https://img.shields.io/github/contributors/psf/requests.svg)](https://github.com/psf/requests/graphs/contributors) + +## Installing Requests and Supported Versions + +Requests is available on PyPI: + +```console +$ python -m pip install requests +``` + +Requests officially supports Python 3.7+. + +## Supported Features & Best–Practices + +Requests is ready for the demands of building robust and reliable HTTP–speaking applications, for the needs of today. + +- Keep-Alive & Connection Pooling +- International Domains and URLs +- Sessions with Cookie Persistence +- Browser-style TLS/SSL Verification +- Basic & Digest Authentication +- Familiar `dict`–like Cookies +- Automatic Content Decompression and Decoding +- Multi-part File Uploads +- SOCKS Proxy Support +- Connection Timeouts +- Streaming Downloads +- Automatic honoring of `.netrc` +- Chunked HTTP Requests + +## API Reference and User Guide available on [Read the Docs](https://requests.readthedocs.io) + +[![Read the Docs](https://raw.githubusercontent.com/psf/requests/main/ext/ss.png)](https://requests.readthedocs.io) + +## Cloning the repository + +When cloning the Requests repository, you may need to add the `-c +fetch.fsck.badTimezone=ignore` flag to avoid an error about a bad commit (see +[this issue](https://github.com/psf/requests/issues/2690) for more background): + +```shell +git clone -c fetch.fsck.badTimezone=ignore https://github.com/psf/requests.git +``` + +You can also apply this setting to your global Git config: + +```shell +git config --global fetch.fsck.badTimezone ignore +``` + +--- + +[![Kenneth Reitz](https://raw.githubusercontent.com/psf/requests/main/ext/kr.png)](https://kennethreitz.org) [![Python Software Foundation](https://raw.githubusercontent.com/psf/requests/main/ext/psf.png)](https://www.python.org/psf) + + diff --git a/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..79e7348e87b742469e67bac4c6b1c9910cfda1f5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/RECORD @@ -0,0 +1,42 @@ +requests-2.31.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +requests-2.31.0.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142 +requests-2.31.0.dist-info/METADATA,sha256=eCPokOnbb0FROLrfl0R5EpDvdufsb9CaN4noJH__54I,4634 +requests-2.31.0.dist-info/RECORD,, +requests-2.31.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92 +requests-2.31.0.dist-info/top_level.txt,sha256=fMSVmHfb5rbGOo6xv-O_tUX6j-WyixssE-SnwcDRxNQ,9 +requests/__init__.py,sha256=LvmKhjIz8mHaKXthC2Mv5ykZ1d92voyf3oJpd-VuAig,4963 +requests/__pycache__/__init__.cpython-310.pyc,, +requests/__pycache__/__version__.cpython-310.pyc,, +requests/__pycache__/_internal_utils.cpython-310.pyc,, +requests/__pycache__/adapters.cpython-310.pyc,, +requests/__pycache__/api.cpython-310.pyc,, +requests/__pycache__/auth.cpython-310.pyc,, +requests/__pycache__/certs.cpython-310.pyc,, +requests/__pycache__/compat.cpython-310.pyc,, +requests/__pycache__/cookies.cpython-310.pyc,, +requests/__pycache__/exceptions.cpython-310.pyc,, +requests/__pycache__/help.cpython-310.pyc,, +requests/__pycache__/hooks.cpython-310.pyc,, +requests/__pycache__/models.cpython-310.pyc,, +requests/__pycache__/packages.cpython-310.pyc,, +requests/__pycache__/sessions.cpython-310.pyc,, +requests/__pycache__/status_codes.cpython-310.pyc,, +requests/__pycache__/structures.cpython-310.pyc,, +requests/__pycache__/utils.cpython-310.pyc,, +requests/__version__.py,sha256=ssI3Ezt7PaxgkOW45GhtwPUclo_SO_ygtIm4A74IOfw,435 +requests/_internal_utils.py,sha256=nMQymr4hs32TqVo5AbCrmcJEhvPUh7xXlluyqwslLiQ,1495 +requests/adapters.py,sha256=v_FmjU5KZ76k-YttShZYB5RprIzhhL8Y3zgW9p4eBQ8,19553 +requests/api.py,sha256=q61xcXq4tmiImrvcSVLTbFyCiD2F-L_-hWKGbz4y8vg,6449 +requests/auth.py,sha256=h-HLlVx9j8rKV5hfSAycP2ApOSglTz77R0tz7qCbbEE,10187 +requests/certs.py,sha256=Z9Sb410Anv6jUFTyss0jFFhU6xst8ctELqfy8Ev23gw,429 +requests/compat.py,sha256=yxntVOSEHGMrn7FNr_32EEam1ZNAdPRdSE13_yaHzTk,1451 +requests/cookies.py,sha256=kD3kNEcCj-mxbtf5fJsSaT86eGoEYpD3X0CSgpzl7BM,18560 +requests/exceptions.py,sha256=DhveFBclVjTRxhRduVpO-GbMYMID2gmjdLfNEqNpI_U,3811 +requests/help.py,sha256=gPX5d_H7Xd88aDABejhqGgl9B1VFRTt5BmiYvL3PzIQ,3875 +requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733 +requests/models.py,sha256=-DlKi0or8gFAM6VzutobXvvBW_2wrJuOF5NfndTIddA,35223 +requests/packages.py,sha256=DXgv-FJIczZITmv0vEBAhWj4W-5CGCIN_ksvgR17Dvs,957 +requests/sessions.py,sha256=-LvTzrPtetSTrR3buxu4XhdgMrJFLB1q5D7P--L2Xhw,30373 +requests/status_codes.py,sha256=FvHmT5uH-_uimtRz5hH9VCbt7VV-Nei2J9upbej6j8g,4235 +requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912 +requests/utils.py,sha256=6sx2X3cIVA8BgWOg8odxFy-_lbWDFETU8HI4fU4Rmqw,33448 diff --git a/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..1f37c02f2eb2e26b306202feaccb31e522b8b169 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.40.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..f2293605cf1b01dca72aad0a15c45b72ed5429a2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/requests-2.31.0.dist-info/top_level.txt @@ -0,0 +1 @@ +requests diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors-0.4.3.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/safetensors-0.4.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/safetensors-0.4.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors-0.4.3.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/safetensors-0.4.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..241d3608c6879bda93c8d7a26f3bbf8807e7fcbe --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/safetensors-0.4.3.dist-info/METADATA @@ -0,0 +1,127 @@ +Metadata-Version: 2.3 +Name: safetensors +Version: 0.4.3 +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Science/Research +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Classifier: Typing :: Typed +Requires-Dist: numpy >=1.21.6 ; extra == 'numpy' +Requires-Dist: safetensors[numpy] ; extra == 'torch' +Requires-Dist: torch >=1.10 ; extra == 'torch' +Requires-Dist: safetensors[numpy] ; extra == 'tensorflow' +Requires-Dist: tensorflow >=2.11.0 ; extra == 'tensorflow' +Requires-Dist: safetensors[numpy] ; extra == 'pinned-tf' +Requires-Dist: tensorflow ==2.11.0 ; extra == 'pinned-tf' +Requires-Dist: safetensors[numpy] ; extra == 'jax' +Requires-Dist: flax >=0.6.3 ; extra == 'jax' +Requires-Dist: jax >=0.3.25 ; extra == 'jax' +Requires-Dist: jaxlib >=0.3.25 ; extra == 'jax' +Requires-Dist: mlx >=0.0.9 ; extra == 'mlx' +Requires-Dist: safetensors[numpy] ; extra == 'paddlepaddle' +Requires-Dist: paddlepaddle >=2.4.1 ; extra == 'paddlepaddle' +Requires-Dist: black ==22.3 ; extra == 'quality' +Requires-Dist: click ==8.0.4 ; extra == 'quality' +Requires-Dist: isort >=5.5.4 ; extra == 'quality' +Requires-Dist: flake8 >=3.8.3 ; extra == 'quality' +Requires-Dist: safetensors[numpy] ; extra == 'testing' +Requires-Dist: h5py >=3.7.0 ; extra == 'testing' +Requires-Dist: huggingface-hub >=0.12.1 ; extra == 'testing' +Requires-Dist: setuptools-rust >=1.5.2 ; extra == 'testing' +Requires-Dist: pytest >=7.2.0 ; extra == 'testing' +Requires-Dist: pytest-benchmark >=4.0.0 ; extra == 'testing' +Requires-Dist: hypothesis >=6.70.2 ; extra == 'testing' +Requires-Dist: safetensors[torch] ; extra == 'all' +Requires-Dist: safetensors[numpy] ; extra == 'all' +Requires-Dist: safetensors[pinned-tf] ; extra == 'all' +Requires-Dist: safetensors[jax] ; extra == 'all' +Requires-Dist: safetensors[paddlepaddle] ; extra == 'all' +Requires-Dist: safetensors[quality] ; extra == 'all' +Requires-Dist: safetensors[testing] ; extra == 'all' +Requires-Dist: safetensors[all] ; extra == 'dev' +Provides-Extra: numpy +Provides-Extra: torch +Provides-Extra: tensorflow +Provides-Extra: pinned-tf +Provides-Extra: jax +Provides-Extra: mlx +Provides-Extra: paddlepaddle +Provides-Extra: quality +Provides-Extra: testing +Provides-Extra: all +Provides-Extra: dev +Author-email: Nicolas Patry +Requires-Python: >=3.7 +Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM +Project-URL: Homepage, https://github.com/huggingface/safetensors +Project-URL: Source, https://github.com/huggingface/safetensors + +## Installation + +``` +pip install safetensors +``` + + +## Usage + +### Numpy + +```python +from safetensors.numpy import save_file, load_file +import numpy as np + +tensors = { + "a": np.zeros((2, 2)), + "b": np.zeros((2, 3), dtype=np.uint8) +} + +save_file(tensors, "./model.safetensors") + + +# Now loading +loaded = load_file("./model.safetensors") +``` + +### Torch + +```python +from safetensors.torch import save_file, load_file +import torch + +tensors = { + "a": torch.zeros((2, 2)), + "b": torch.zeros((2, 3), dtype=torch.uint8) +} + +save_file(tensors, "./model.safetensors") + + +# Now loading +loaded = load_file("./model.safetensors") +``` + +### Developing + +``` +# inside ./safetensors/bindings/python +pip install .[dev] +``` +Should be enough to install this library locally. + +### Testing + +``` +# inside ./safetensors/bindings/python +pip install .[dev] +pytest -sv tests/ +``` + diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors-0.4.3.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/safetensors-0.4.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..19da2aa348a3a5c42585b47067ba0125f30c99dd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/safetensors-0.4.3.dist-info/RECORD @@ -0,0 +1,21 @@ +safetensors-0.4.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +safetensors-0.4.3.dist-info/METADATA,sha256=xyYOydMNNt9ejapL8t76ASs9iTE5oS7IrZOQl8e4wK0,3842 +safetensors-0.4.3.dist-info/RECORD,, +safetensors-0.4.3.dist-info/WHEEL,sha256=JL8sd1C0RQ2f7cmwbAn1Jp257v_vSS2r0VvTBpJeZwA,129 +safetensors/__init__.py,sha256=rFhZV2HBVDIijU2xKjg-0viTLETa-yMLMgFC9-47hdc,171 +safetensors/__init__.pyi,sha256=z6kNUzegHpyQAtFLtHu7ixUYWK9-Kwb6GqvV__6qMew,1970 +safetensors/__pycache__/__init__.cpython-310.pyc,, +safetensors/__pycache__/flax.cpython-310.pyc,, +safetensors/__pycache__/mlx.cpython-310.pyc,, +safetensors/__pycache__/numpy.cpython-310.pyc,, +safetensors/__pycache__/paddle.cpython-310.pyc,, +safetensors/__pycache__/tensorflow.cpython-310.pyc,, +safetensors/__pycache__/torch.cpython-310.pyc,, +safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so,sha256=tjRccWtdZ62y0_JHfAfAsaIUpwqny3EQHZkyeroL-qA,4438576 +safetensors/flax.py,sha256=AuyY2YHxTBy4xeQLxhAyMledHykpz0Qsys13eKiHNYg,3846 +safetensors/mlx.py,sha256=7OTFtbdmGIqLnrjo_36RJRJ7zq1aI6HasvRPLB3d_Gw,3837 +safetensors/numpy.py,sha256=5Z_wSFRxVbsAXpnej2zuqtdYK9K-nqZs1EE_G14C6Ck,4937 +safetensors/paddle.py,sha256=V_RCTXymi2PJAG_jA4-qnfzbYAUB_74VYYXFzhXTLYM,4175 +safetensors/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +safetensors/tensorflow.py,sha256=6XaUBcxm0xL4ulNcvVsllKZ_qJ_rBSlE50rNnECCNYQ,3890 +safetensors/torch.py,sha256=_Tr2aFS0o5vmauTWKfJFo43N-UR_Ap8crx3O8XpryJI,17582 diff --git a/llmeval-env/lib/python3.10/site-packages/safetensors-0.4.3.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/safetensors-0.4.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..efc3fe1c2443c99179e12648847b89e865477d5f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/safetensors-0.4.3.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: maturin (1.5.1) +Root-Is-Purelib: false +Tag: cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64 diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..becc9a66ea739ba941d48a749e248761cc6e658a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/entry_points.txt b/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..9466bf6320157d79e69d6940e2b09fb08f64da51 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/setuptools-59.6.0.dist-info/entry_points.txt @@ -0,0 +1,56 @@ +[distutils.commands] +alias = setuptools.command.alias:alias +bdist_egg = setuptools.command.bdist_egg:bdist_egg +bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm +build_clib = setuptools.command.build_clib:build_clib +build_ext = setuptools.command.build_ext:build_ext +build_py = setuptools.command.build_py:build_py +develop = setuptools.command.develop:develop +dist_info = setuptools.command.dist_info:dist_info +easy_install = setuptools.command.easy_install:easy_install +egg_info = setuptools.command.egg_info:egg_info +install = setuptools.command.install:install +install_egg_info = setuptools.command.install_egg_info:install_egg_info +install_lib = setuptools.command.install_lib:install_lib +install_scripts = setuptools.command.install_scripts:install_scripts +rotate = setuptools.command.rotate:rotate +saveopts = setuptools.command.saveopts:saveopts +sdist = setuptools.command.sdist:sdist +setopt = setuptools.command.setopt:setopt +test = setuptools.command.test:test +upload_docs = setuptools.command.upload_docs:upload_docs + +[distutils.setup_keywords] +dependency_links = setuptools.dist:assert_string_list +eager_resources = setuptools.dist:assert_string_list +entry_points = setuptools.dist:check_entry_points +exclude_package_data = setuptools.dist:check_package_data +extras_require = setuptools.dist:check_extras +include_package_data = setuptools.dist:assert_bool +install_requires = setuptools.dist:check_requirements +namespace_packages = setuptools.dist:check_nsp +package_data = setuptools.dist:check_package_data +packages = setuptools.dist:check_packages +python_requires = setuptools.dist:check_specifier +setup_requires = setuptools.dist:check_requirements +test_loader = setuptools.dist:check_importable +test_runner = setuptools.dist:check_importable +test_suite = setuptools.dist:check_test_suite +tests_require = setuptools.dist:check_requirements +use_2to3 = setuptools.dist:invalid_unless_false +zip_safe = setuptools.dist:assert_bool + +[egg_info.writers] +PKG-INFO = setuptools.command.egg_info:write_pkg_info +dependency_links.txt = setuptools.command.egg_info:overwrite_arg +depends.txt = setuptools.command.egg_info:warn_depends_obsolete +eager_resources.txt = setuptools.command.egg_info:overwrite_arg +entry_points.txt = setuptools.command.egg_info:write_entries +namespace_packages.txt = setuptools.command.egg_info:overwrite_arg +requires.txt = setuptools.command.egg_info:write_requirements +top_level.txt = setuptools.command.egg_info:write_toplevel_names + +[setuptools.finalize_distribution_options] +keywords = setuptools.dist:Distribution._finalize_setup_keywords +parent_finalize = setuptools.dist:_Distribution.finalize_options + diff --git a/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..c467ac089a1caa6610d1379fe6a48132c15dcd5a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Tsuyoshi Hombashi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..00c2945f053be786fb7e11a525ee5641ae3b3d94 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/METADATA @@ -0,0 +1,102 @@ +Metadata-Version: 2.1 +Name: tabledata +Version: 1.3.3 +Summary: tabledata is a Python library to represent tabular data. Used for pytablewriter/pytablereader/SimpleSQLite/etc. +Home-page: https://github.com/thombashi/tabledata +Author: Tsuyoshi Hombashi +Author-email: tsuyoshi.hombashi@gmail.com +License: MIT License +Project-URL: Changlog, https://github.com/thombashi/tabledata/releases +Project-URL: Documentation, https://tabledata.rtfd.io/ +Project-URL: Source, https://github.com/thombashi/tabledata +Project-URL: Tracker, https://github.com/thombashi/tabledata/issues +Keywords: table +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Information Technology +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +License-File: LICENSE +Requires-Dist: DataProperty <2,>=1.0.1 +Requires-Dist: typepy <2,>=1.2.0 +Provides-Extra: logging +Requires-Dist: loguru <1,>=0.4.1 ; extra == 'logging' +Provides-Extra: test +Requires-Dist: pytablewriter >=0.46 ; extra == 'test' +Requires-Dist: pytest ; extra == 'test' + +.. contents:: **tabledata** + :backlinks: top + :depth: 2 + +Summary +--------- +`tabledata `__ is a Python library to represent tabular data. Used for pytablewriter/pytablereader/SimpleSQLite/etc. + +.. image:: https://badge.fury.io/py/tabledata.svg + :target: https://badge.fury.io/py/tabledata + :alt: PyPI package version + +.. image:: https://img.shields.io/pypi/pyversions/tabledata.svg + :target: https://pypi.org/project/tabledata + :alt: Supported Python versions + +.. image:: https://img.shields.io/pypi/implementation/tabledata.svg + :target: https://pypi.org/project/tabledata + :alt: Supported Python implementations + +.. image:: https://github.com/thombashi/tabledata/actions/workflows/ci.yml/badge.svg + :target: https://github.com/thombashi/tabledata/actions/workflows/ci.yml + :alt: Linux/macOS/Windows CI status + +.. image:: https://coveralls.io/repos/github/thombashi/tabledata/badge.svg?branch=master + :target: https://coveralls.io/github/thombashi/tabledata?branch=master + :alt: Test coverage + +Installation +============ + +Install from PyPI +------------------------------ +:: + + pip install tabledata + +Install from PPA (for Ubuntu) +------------------------------ +:: + + sudo add-apt-repository ppa:thombashi/ppa + sudo apt update + sudo apt install python3-tabledata + + +Dependencies +============ +- Python 3.7+ +- `Mandatory Python package dependencies (automatically installed) `__ + +Optional Python packages +------------------------------------------------ +- `loguru `__ + - Used for logging if the package installed +- `pandas `__ + - required to get table data as a pandas data frame + +Documentation +=============== +https://tabledata.rtfd.io/ + diff --git a/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..ef0f46d59e3fc059c82a7c256f4fc6d56d1c7378 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/RECORD @@ -0,0 +1,29 @@ +tabledata-1.3.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +tabledata-1.3.3.dist-info/LICENSE,sha256=vrvfBSShR_iaYV__U9eb3JDLx2MVUPtLclzT873NJPY,1074 +tabledata-1.3.3.dist-info/METADATA,sha256=IKxSJeg1Qrr6dSTCJdvnBIiKl6IKCa4aAIC_B4Ngwfg,3657 +tabledata-1.3.3.dist-info/RECORD,, +tabledata-1.3.3.dist-info/WHEEL,sha256=yQN5g4mg4AybRjkgi-9yy4iQEFibGQmlz78Pik5Or-A,92 +tabledata-1.3.3.dist-info/top_level.txt,sha256=wPYCjph2PxB5odPJWPADX_65iL1gAIjMQFlAyZi80iI,10 +tabledata/__init__.py,sha256=OkkMA83NWJOKsmUru4qWiUXrwTxF5jDhHXl_dR2zQBQ,683 +tabledata/__pycache__/__init__.cpython-310.pyc,, +tabledata/__pycache__/__version__.cpython-310.pyc,, +tabledata/__pycache__/_common.cpython-310.pyc,, +tabledata/__pycache__/_constant.cpython-310.pyc,, +tabledata/__pycache__/_converter.cpython-310.pyc,, +tabledata/__pycache__/_core.cpython-310.pyc,, +tabledata/__pycache__/error.cpython-310.pyc,, +tabledata/__pycache__/normalizer.cpython-310.pyc,, +tabledata/__version__.py,sha256=JC4TkyHfH-eP9nAvfI04H3gEbgfItYa1jLE09ARSNSc,201 +tabledata/_common.py,sha256=eB3xHflvbF5p5hz1f5D9xNHQCujy6Uk91NLPTy5fFHY,274 +tabledata/_constant.py,sha256=I763_Fx-9IT_ZQTTncxi04WsXd6tK78z2VBYZ3up5Aw,154 +tabledata/_converter.py,sha256=0H61eirjQw_rs0h1N_APtCthRRFbYkKZVUHK-5-0GAE,895 +tabledata/_core.py,sha256=4y0sLRCEcvjJvqi_pUlhz5qjIass_pZu5FcnK_kpr7U,14530 +tabledata/_logger/__init__.py,sha256=7rkhAj6PGbUI3fouTa7GEzjRelUFj0_UPfzkZ_Yk71g,55 +tabledata/_logger/__pycache__/__init__.cpython-310.pyc,, +tabledata/_logger/__pycache__/_logger.cpython-310.pyc,, +tabledata/_logger/__pycache__/_null_logger.cpython-310.pyc,, +tabledata/_logger/_logger.py,sha256=3HreG22mzHGZvexAGZpjkU4A995ZZmGJmiIkPcrkA4o,783 +tabledata/_logger/_null_logger.py,sha256=QJuaErUIV_x6NjQ9qNX9eNSi_GB_9CrO7lKeXYZnuaw,1088 +tabledata/error.py,sha256=UGGJm3_9oLQi9GBWZz4cqp1dnzc5Kbu37c6CsiWozME,526 +tabledata/normalizer.py,sha256=lVz4agT8Bm97rvKUUUhP3OT1pGDsMczB5rAlx316XoY,6465 +tabledata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..7e688737d490be3643d705bc16b5a77f7bd567b7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.41.2) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..43aaf2f47a3d66da5f27d0be87b84007bd70669a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tabledata-1.3.3.dist-info/top_level.txt @@ -0,0 +1 @@ +tabledata diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_refs/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/_refs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fa571489bd7f3e752872eee4c7832704dfece39f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_refs/__init__.py @@ -0,0 +1,6443 @@ +import builtins +import collections +import inspect +import itertools +import math +import operator +import warnings + +from collections.abc import Iterable +from enum import Enum +from functools import partial, reduce, singledispatch, wraps +from typing import Any, Callable, Dict, List, Optional, overload, Sequence, Tuple, Union + +import torch + +import torch._prims as prims +import torch._prims_common as utils +from torch import sym_float, sym_int +from torch._prims_common import ( + DeviceLikeType, + Dim, + DimsSequenceType, + DimsType, + dtype_to_type, + ELEMENTWISE_TYPE_PROMOTION_KIND, + FloatLike, + FloatWithoutSymFloat, + IntLike, + is_weakly_lesser_type, + Number, + NumberType, + RealNumberType, + REDUCTION_OUTPUT_TYPE_KIND, + ShapeType, + StrideType, + TensorLike, + TensorLikeType, + TensorOrNumberLikeType, + TensorSequenceType, +) +from torch._prims_common.wrappers import ( + _maybe_convert_to_dtype, + _maybe_resize_out, + _safe_copy_out, + elementwise_type_promotion_wrapper, + elementwise_unary_scalar_wrapper, + out_wrapper, +) + +# Experimental module containing prototype Python references for existing +# PyTorch operations. + +__all__ = [ + # + # Elementwise Unary References + # + "abs", + "acos", + "acosh", + "asinh", + "asin", + "atan", + "atanh", + "bitwise_not", + # "cbrt", # No corresponding torch operation + "ceil", + "conj_physical", + "cos", + "cosh", + "count_nonzero", + "deg2rad", + "digamma", + "erf", + "erfinv", + "erfc", + "exp", + "expm1", + "exponential", + "exp2", + "fill", + "fill_", + "floor", + "frac", + "geometric", + "index_add", + "index_copy", + "index_copy_", + "index_select", + "index_fill", + "index_fill_", + "isfinite", + "isinf", + "isposinf", + "isneginf", + "isnan", + "isreal", + "i0", + "lerp", + "lgamma", + "log", + "log1p", + "log2", + "log10", + "log_normal", + "log_softmax", + "mvlgamma", + "norm", + "normal", + "nan_to_num", + "neg", + "positive", + "rad2deg", + "reciprocal", + "round", # TODO: model kwargs + "sigmoid", + "sgn", + "sign", + "signbit", + "sin", + "sinc", + "sinh", + "softmax", + "sqrt", + "square", + "tan", + "tanh", + "trace", + "trunc", + # + # Elementwise Binary References + # + "add", + "atan2", + "bitwise_and", + "bitwise_left_shift", + "bitwise_or", + "bitwise_right_shift", + "bitwise_xor", + "clamp_min", + "clamp_max", + "copysign", + "div", + "eq", + "float_power", + "floor_divide", + "fmax", + "fmin", + "fmod", + "gcd", + "ge", + "gt", + "heaviside", + "hypot", + "igamma", + "igammac", + "imag", + "isclose", + "lcm", + # 'ldexp', + "le", + "logaddexp", + "logaddexp2", + "logical_and", + "logical_not", + "logical_or", + "logical_xor", + "logsumexp", + "lt", + # 'max', # implement with reductions + "maximum", + # 'min', # implement with reductions + "minimum", + "mul", + "ne", + "nextafter", + # 'polar', # abs, cos, sin + "pow", + "real", + "rpow", + "remainder", + "rsub", + "rtruediv", + "rfloordiv", + "sub", + "true_divide", + "trunc_divide", + "xlogy", + # + # Elementwise Ternary References + # + "addcdiv", + "addcmul", + "clamp", + # + # Conditional references + # + "masked_fill", + "masked_fill_", + "where", + # + # Data conversion and movement references + # + "clone", + "copy_to", # TODO: add OpInfo (or implement .to) + "item", + "to", + # + # Reduction ops + # + "all", + "amax", + "amin", + "any", + "cumsum", + "cumprod", + "mean", + "dot", + "vdot", + "std", + "std_mean", + "sum", + "sum_to_size", + "prod", + "var", + "var_mean", + # + # Linear algebra ops + # + "addr", + # + # View & Shape Ops + # + "alias", + "atleast_1d", + "atleast_2d", + "atleast_3d", + "as_strided", + "as_strided_scatter", + "block_diag", + "broadcast_shapes", + "broadcast_tensors", + "broadcast_to", + "cat", + "chunk", + "column_stack", + "conj", + "constant_pad_nd", + "contiguous", + "diag_embed", + "diag", + "diagonal", + "diagonal_copy", + "diagonal_scatter", + "dsplit", + "dstack", + "expand", + "expand_as", + "flatten", + "flip", + "fliplr", + "flipud", + "hsplit", + "hstack", + "meshgrid", + "movedim", + "narrow", + "narrow_copy", + "native_group_norm", + "native_layer_norm", + "permute", + "ravel", + "repeat", + "reshape", + "reshape_as", + "roll", + "rot90", + "rsqrt", + "stack", + "swap_axes", # alias for transpose + "squeeze", + "t", + "T", + "take_along_dim", + "tensor_split", + "transpose", + "unfold", + "unfold_copy", + "unsqueeze", + "view", + "view_as", + "vsplit", + "vstack", + "view_as_complex", + "unflatten", + "unbind", + "triu", + "tril", + "triu_indices", + "tril_indices", + # + # Tensor Creation + # + "arange", + "cauchy", + "empty", + "empty_like", + "empty_permuted", + "empty_strided", + "eye", + "full", + "full_like", + "linspace", + "logspace", + "new_empty", + "new_empty_strided", + "new_full", + "new_ones", + "new_zeros", + "ones", + "ones_like", + "randn", + "scalar_tensor", + "zero", + "zeros", + "zeros_like", + # + # Test-related functions + # + "allclose", + "equal", + # + # Statistical operations + # + "bucketize", + # + # Misc + # + "is_complex", + "renorm", + "stft", + "istft", +] + +Tensor = torch.Tensor +DispatchKey = torch._C.DispatchKey # type: ignore[attr-defined] +aten = torch._ops.ops.aten + +# Note that the docstrings for the public methods from this file are in +# torch/_torch_docs.py + + +def is_noncontiguous_supported(device): + if device is not None and device.type == "hpu": + return False + return True + + +def handle_noncontiguous_outputs(input_tlist, output): + device = None + from torch._subclasses.fake_tensor import FakeTensor + + for t in input_tlist: + if isinstance(t, FakeTensor): + device = t.fake_device + break + + if not is_noncontiguous_supported(device): + output = output.contiguous() + + return output + + +def _broadcast_shapes(*_shapes): + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + shapes = tuple( + (x,) if isinstance(x, IntLike) else x + for x in filter(lambda x: x is not None, _shapes) + ) + + # Short-circuits on no input + if len(shapes) == 0: + return None + + # Type checking + # TODO: make common validations available as utils + for shape in shapes: + assert isinstance(shape, Sequence) + + # Computes common shape + common_shape = [ + 1, + ] * reduce(max, (len(shape) for shape in shapes)) + for arg_idx, shape in enumerate(shapes): + for idx in range(-1, -1 - len(shape), -1): + if guard_size_oblivious(common_shape[idx] == 1): + if shape[idx] < 0: + raise ValueError( + "Attempting to broadcast a dimension with negative length!" + ) + common_shape[idx] = shape[idx] + elif guard_size_oblivious(shape[idx] != 1): + if common_shape[idx] != shape[idx]: + raise RuntimeError( + f"Attempting to broadcast a dimension of length {shape[idx]} at {idx}! " + f"Mismatching argument at index {arg_idx} had {shape}; but expected shape " + f"should be broadcastable to {common_shape}" + ) + + return common_shape + + +def _maybe_broadcast(*args, preserve_cpu_scalar_tensors=True): + # Computes common shape + common_shape = _broadcast_shapes( + *(t.shape if isinstance(t, TensorLike) else None for t in args) + ) + + def __maybe_broadcast(x, shape): + if x is None: + return None + elif isinstance(x, Number): + return x + elif isinstance(x, TensorLike): + if preserve_cpu_scalar_tensors and utils.is_cpu_scalar_tensor(x): + return x + + if not utils.same_shape(x.shape, common_shape): + return x.expand(common_shape) + + return x + else: + raise RuntimeError( + "Unexpected type when broadcasting: " + str(type(x)) + "!" + ) + + return tuple(__maybe_broadcast(x, common_shape) for x in args) + + +# Utilities should come BEFORE this import +from torch._decomp import register_decomposition + +# +# Elementwise unary references +# + +infer_aten_op = object() + + +# TODO: add type promotion support +def _make_elementwise_unary_reference( + type_promotion_kind, + *, + aten_op=infer_aten_op, + extra_meta=None, +) -> Callable: + def inner(prim: Callable): + nonlocal aten_op + + @wraps(prim) + @out_wrapper() + @elementwise_unary_scalar_wrapper + @elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=type_promotion_kind, + ) + def _ref(a: TensorLikeType) -> TensorLikeType: + if extra_meta is not None: + extra_meta(a) + + output = prim(a) + return handle_noncontiguous_outputs([a], output) + + if aten_op is infer_aten_op: + aten_op = utils.get_aten_op(prim, prim.__name__) + if aten_op is not None: + register_decomposition(aten_op)(_ref) + + return _ref + + return inner + + +def _make_alias(fn, name): + """ + This function defines an alias of another function and sets its __name__ argument. + It also sets its __module__ argument to the module of the caller. + Note that when naïvely doing `alias = fn`, we have that `alias.__name__ == "fn"`, and + `alias.__module__ == fn.__module__`. + """ + + def _fn(*args, **kwargs): + return fn(*args, **kwargs) + + _fn.__name__ = name + _fn.__module__ = inspect.currentframe().f_back.f_globals["__name__"] # type: ignore[union-attr] + return _fn + + +def _make_inplace(fn): + """ + Given a function with out variant (i.e. using `out_wrapper()), it returns its in-place variant + See https://github.com/pytorch/pytorch/wiki/Developer-FAQ#how-do-in-place-operations-work-in-pytorch + """ + + # nb. We use the name of the first argument used in the unary references + @wraps(fn) + def _fn(a, *args, **kwargs): + return fn(a, *args, out=a, **kwargs) + + inplace_name = f"{fn.__name__}_" + _fn.__name__ = inplace_name + _fn = register_decomposition(getattr(aten, inplace_name))(_fn) + + # We access the __all__ attribute of the module where fn is defined + # There may be a cleaner way of doing this... + from inspect import getmodule + + _all = getmodule(fn).__all__ # type: ignore[union-attr] + if inplace_name not in _all: + _all.append(inplace_name) + return _fn + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT) +def abs(a): + return prims.abs(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def acos(a): + return prims.acos(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def acosh(a): + return prims.acosh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def asin(a): + return prims.asin(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def asinh(a): + return prims.asinh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def atan(a): + return prims.atan(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def atanh(a): + return prims.atanh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def bitwise_not(a): + return prims.bitwise_not(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def ceil(a): + return prims.ceil(a) + + +@register_decomposition(aten.is_complex) +def is_complex(input: TensorLikeType): + return utils.is_complex_dtype(input.dtype) + + +@register_decomposition(aten.conj_physical) +@out_wrapper() +def conj_physical(input: TensorLikeType): + if not utils.is_complex_dtype(input.dtype): + return input + return prims.conj_physical(input) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def cos(a): + return prims.cos(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def cosh(a): + return prims.cosh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def digamma(a): + return prims.digamma(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def erf(a): + return prims.erf(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def erfinv(a): + return prims.erf_inv(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def erfc(a): + return prims.erfc(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def exp(a): + return prims.exp(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def expm1(a): + return prims.expm1(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def exp2(a): + return prims.exp2(a) + + +# Fill has its own implementation because it has a value parameter +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a,"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH, +) +def fill(a: TensorLikeType, value: NumberType) -> TensorLikeType: + assert isinstance(a, TensorLike) + assert isinstance(value, Number) + + python_type = utils.dtype_to_type(a.dtype) + if not utils.is_weakly_lesser_type(type(value), python_type): + msg = f"value argument of type {type(value)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + + return prims.fill(a, value) + + +def fill_(a: TensorLikeType, value: NumberType) -> TensorLikeType: + r = prims.fill(a, value) + prims.copy_to(a, r) + return a + + +@register_decomposition(aten.zero) +@out_wrapper() +def zero(input: TensorLikeType) -> TensorLikeType: + return torch.zeros_like(input) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def floor(a): + return prims.floor(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def frac(x: TensorLikeType) -> TensorLikeType: + trunc_x = torch.mul(torch.floor(torch.abs(x)), torch.sign(x)) + return torch.sub(x, trunc_x) + + +# imag does not use _make_elementwise_unary_reference because it does not support out +def imag(a: TensorLikeType) -> TensorLikeType: + assert isinstance(a, TensorLike) + torch._check( + utils.is_complex_dtype(a.dtype), lambda: "imag only supports complex tensors." + ) + return prims.imag(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + aten_op=None, # CompositeImplicitAutograd +) +def isfinite(a: TensorLikeType) -> TensorLikeType: + if utils.is_float_dtype(a.dtype) or utils.is_complex_dtype(a.dtype): + return prims.isfinite(a) + + return ones_like(a, dtype=torch.bool) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def isinf(a: TensorLikeType) -> TensorLikeType: + if utils.is_complex_dtype(a.dtype): + return torch.logical_or(isinf(torch.real(a)), isinf(torch.imag(a))) + if utils.is_float_dtype(a.dtype): + return torch.abs(a) == float("inf") + return torch.zeros_like(a, dtype=torch.bool) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def isposinf(a: TensorLikeType) -> TensorLikeType: + torch._check( + not utils.is_complex_dtype(a.dtype), + lambda: f"Complex dtype is not supported for isposinf, got dtype {a.dtype}", + ) + if utils.is_float_dtype(a.dtype): + return a == float("inf") + return torch.zeros_like(a, dtype=torch.bool) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def isneginf(a: TensorLikeType) -> TensorLikeType: + torch._check( + not utils.is_complex_dtype(a.dtype), + lambda: f"Complex dtype is not supported for isneginf, got dtype {a.dtype}", + ) + if utils.is_float_dtype(a.dtype): + return a == float("-inf") + return torch.zeros_like(a, dtype=torch.bool) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def isnan(a: TensorLikeType) -> TensorLikeType: + return prims.ne(a, a) + + +# alias +mvlgamma = _make_alias(torch.special.multigammaln, "mvlgamma") # type: ignore[has-type] + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + aten_op=None, # CompositeImplicitAutograd +) +def isreal(a: TensorLikeType) -> TensorLikeType: + if utils.is_complex_dtype(a.dtype): + return torch.imag(a) == 0 + return torch.ones_like(a, dtype=torch.bool) + + +# TODO: if this is special maybe it should be defined there and imported here? +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, aten_op=aten.i0 +) +def i0(a): + return prims.bessel_i0(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def lgamma(a): + return prims.lgamma(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def log(a): + return prims.log(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def log1p(a): + return prims.log1p(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def log2(a): + return prims.log2(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def log10(a): + return prims.log10(a) + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def log_softmax( + a: TensorLikeType, + dim: int, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + result_dtype = dtype or a.dtype + computation_dtype = utils.get_computation_dtype(result_dtype) + a_ = _maybe_convert_to_dtype(a, computation_dtype) + return _maybe_convert_to_dtype(a_ - logsumexp(a_, dim, keepdim=True), result_dtype) # type: ignore[return-value] + + +@register_decomposition(aten.logsumexp) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def logsumexp( + self: TensorLikeType, dim: DimsType, keepdim: bool = False +) -> TensorLikeType: + if not isinstance(dim, Iterable): + dim = (dim,) + if self.numel() == 0: + return torch.sum(torch.exp(self), dim, keepdim).log() + maxes = torch.amax(self, dim, keepdim=True) + maxes = torch.masked_fill(maxes, maxes.abs() == float("inf"), 0) + maxes_squeezed = maxes if keepdim else torch.squeeze(maxes, dim) + result = torch.sum(torch.exp(self - maxes), dim, keepdim) + return result.log().add(maxes_squeezed) + + +@register_decomposition(aten.nan_to_num) +@out_wrapper() +def nan_to_num( + a: TensorLikeType, + nan: Optional[NumberType] = 0.0, + posinf: Optional[NumberType] = None, + neginf: Optional[NumberType] = None, +) -> TensorLikeType: + assert isinstance(a, TensorLike) + + if utils.is_boolean_dtype(a.dtype) or utils.is_integer_dtype(a.dtype): + return a.clone() + + if nan is None: + nan = 0.0 + + if posinf is None: + posinf = torch.finfo(a.dtype).max + + if neginf is None: + neginf = torch.finfo(a.dtype).min + + result = torch.where(torch.isnan(a), nan, a) # type: ignore[call-overload] + result = torch.where(torch.isneginf(a), neginf, result) # type: ignore[call-overload] + result = torch.where(torch.isposinf(a), posinf, result) # type: ignore[call-overload] + return result + + +def _neg_meta(a: TensorLikeType): + torch._check( + a.dtype is not torch.bool, + lambda: ( + "Negation, the `-` operator, on a bool tensor is not supported. " + "If you are trying to invert a mask, use the `~` or `logical_not()` " + "operator instead." + ), + ) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, extra_meta=_neg_meta +) +def neg(a): + return prims.neg(a) + + +# positive does not use _make_elementwise_unary_reference because it does not support out +# CompositeImplicitAutograd - don't register decomp +def positive(a: TensorLikeType) -> TensorLikeType: + assert isinstance(a, TensorLike) + if a.dtype is torch.bool: + msg = "positive does not support bool tensors." + raise RuntimeError(msg) + return a + + +# real does not use _make_elementwise_unary_reference because it does not support out +def real(a: TensorLikeType) -> TensorLikeType: + assert isinstance(a, TensorLike) + if utils.is_complex_dtype(a.dtype): + return prims.real(a) + return a + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def reciprocal(a): + return prims.reciprocal(a) + + +@register_decomposition(aten.round) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def round(a: TensorLikeType, *, decimals: int = 0) -> TensorLikeType: + if decimals == 0: + return prims.round(a) + else: + ten_pow = 10**decimals + ten_neg_pow = 10 ** (-decimals) + return prims.mul(prims.round(prims.mul(a, ten_pow)), ten_neg_pow) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def rsqrt(a): + return prims.rsqrt(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sigmoid(a: TensorLikeType) -> TensorLikeType: + return true_divide(1, add(1, exp(neg(a)))) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def sgn(a): + if utils.is_complex_dtype(a.dtype): + a_abs = a.abs() + return torch.where(a_abs == 0, 0, a / a_abs) + else: + return a.sign() + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def sign(a): + return prims.sign(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def signbit(a): + return prims.signbit(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sin(a): + return prims.sin(a) + + +# Autograd note: This will give the right first derivative at zero (by chance), +# but not the right second derivative +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sinc(a): + a = math.pi * a + return torch.where(a == 0, 1, torch.sin(a) / a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sinh(a): + return prims.sinh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def sqrt(a): + return prims.sqrt(a) + + +@_make_elementwise_unary_reference( + ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG, + aten_op=None, # CompositeImplicitAutograd, +) +def square(a: TensorLikeType) -> TensorLikeType: + return mul(a, a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def tan(a): + return prims.tan(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def tanh(a): + return prims.tanh(a) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) +def trunc(a): + return prims.trunc(a) + + +# TODO: register this as a real ref/decomposition once TorchInductor supports complex! +def view_as_complex(self: TensorLikeType) -> TensorLikeType: + input_dtype = self.dtype + torch._check( + utils.is_float_dtype(input_dtype), + lambda: f"view_as_complex is only supported for floating point" + f"tensors, but got a tensor of scalar type: {input_dtype}", + ) + sizes = self.size() + torch._check( + len(sizes) != 0, + lambda: "Input tensor must have one or more dimensions", + ) + torch._check( + sizes[-1] == 2, + lambda: "Tensor must have a last dimension of size 2", + ) + + old_strides = self.stride() + torch._check( + old_strides[-1] == 1, + lambda: "Tensor must have a last dimension with stride 1", + ) + dims = old_strides[:-1] + torch._check( + py_all(stride % 2 == 0 for stride in dims), + lambda: "Tensor must have a stride divisible by 2 for all but last dimension", + ) + torch._check( + self.storage_offset() % 2 == 0, + lambda: "Tensor must have a storage_offset divisible by 2", + ) + return prims.view_element_type( + self, utils.corresponding_complex_dtype(input_dtype) + ).squeeze(-1) + + +def _make_elementwise_binary_reference( + type_promotion_kind, + aten_op=infer_aten_op, + name=None, + has_out=True, + supports_lhs_python_scalar=True, + supports_rhs_python_scalar=True, + supports_two_python_scalars=False, + should_register_decomposition=True, +) -> Callable: + def inner(prim: Callable): + nonlocal aten_op, name + if name is None: + name = prim.__name__ + + @wraps(prim) + @elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=type_promotion_kind, + ) + def _ref( + a: Union[Tensor, NumberType], + b: Union[Tensor, NumberType], + ) -> Tensor: + torch._check_value( + supports_lhs_python_scalar or not isinstance(a, Number), + lambda: f"{name}: Received a lhs Python scalar to an elementwise binary " + "operation that does not accept lhs scalars!", + ) + torch._check_value( + supports_rhs_python_scalar or not isinstance(b, Number), + lambda: f"{name}: Received a rhs Python scalar to an elementwise binary " + "operation that does not accept rhs scalars!", + ) + torch._check_value( + supports_two_python_scalars + or not (isinstance(a, Number) and isinstance(b, Number)), + lambda: f"{name}: Receive two Number inputs to an elementwise binary operation!", + ) + a, b = _maybe_broadcast(a, b) + output = prim(a, b) + return handle_noncontiguous_outputs([a, b], output) + + if has_out: + _ref = out_wrapper()(_ref) + + _ref.__name__ = name + if aten_op is infer_aten_op: + aten_op = utils.get_aten_op(prim, name) + if aten_op is not None and should_register_decomposition: + register_decomposition(aten_op)(_ref) + + return _ref + + return inner + + +# Add has its own implementation because it has an alpha argument +@register_decomposition(aten.add) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def add( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + *, + alpha: Optional[NumberType] = None, +): + """ + Reference implementation of torch.add + """ + + a, b = _maybe_broadcast(a, b) + + if alpha is not None: + dtype = a.dtype if isinstance(a, TensorLike) else b.dtype # type: ignore[union-attr] + python_type = utils.dtype_to_type(dtype) + if python_type != bool and not utils.is_weakly_lesser_type( + type(alpha), python_type + ): + msg = f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + if isinstance(b, TensorLike): + b = prims.mul(b, alpha) + else: + b = b * alpha + + output = prims.add(a, b) + return handle_noncontiguous_outputs([a, b], output) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def atan2(a, b): + return prims.atan2(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_and(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.bitwise_and(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_left_shift(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.shift_left(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_or(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.bitwise_or(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_right_shift(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.shift_right_arithmetic(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def bitwise_xor(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.bitwise_xor(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + supports_lhs_python_scalar=False, +) +def copysign( + a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType] +): + if isinstance(b, Number) and isinstance(a, Tensor): + b = scalar_tensor(b, dtype=a.dtype, device=a.device) + elif isinstance(a, Tensor) and isinstance(b, Tensor) and a.device != b.device: + msg = "Expected divisor (b) to be on the same device ({}) as dividend (a), but it is found on {}!".format( + a.device, b.device + ) + raise RuntimeError(msg) + return where(signbit(b), neg(abs(a)), abs(a)) + + +# complex = _make_elementwise_binary_reference(prims.complex, type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) + + +@register_decomposition(aten.div) +@out_wrapper() +def div( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + *, + rounding_mode: Optional[str] = None, +): + """ + Reference implementation of torch.div + """ + if rounding_mode is None: + return true_divide(a, b) + elif rounding_mode == "trunc": + return trunc_divide(a, b) + elif rounding_mode == "floor": + return floor_divide(a, b) + else: + msg = f"div expected rounding_mode to be one of None, 'trunc', or 'floor' but found {rounding_mode}." + raise ValueError(msg) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def eq(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.eq(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.BOOL_TO_LONG, +) +def pow( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], +) -> TensorLikeType: + assert isinstance(a, TensorLikeType) or isinstance(b, TensorLikeType) + + if isinstance(b, Number): + if b == 1.0: + return a.clone() # type: ignore[return-value,union-attr] + elif b == 2.0: + return a * a # type: ignore[return-value] + elif b == 0.5: + return torch.sqrt(a) # type: ignore[arg-type] + elif isinstance(a, Number): + if a == 1.0: + return torch.fill(b, True) + if a == 2.0 and ( + utils.is_float_dtype(b.dtype) or utils.is_complex_dtype(b.dtype) + ): + return torch.exp2(b) + + return prims.pow(a, b) + + +# Float power has its own implementation because it has unique type promotion. +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def float_power( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], +) -> Tensor: + if isinstance(a, Number) and isinstance(b, Number): + raise ValueError( + "Receive two Number inputs to an elementwise binary operation!" + ) + + # Handles type promotion + dtype = utils.get_higher_dtype(a, b) + assert dtype is not None + if utils.is_complex_dtype(dtype): + dtype = torch.complex128 + else: + dtype = torch.float64 + + # Float power has the following contiguous cast behavior to be + # consistent with its C++ impl + a = _maybe_convert_to_dtype(a, dtype) + b = _maybe_convert_to_dtype(b, dtype) + + a, b = _maybe_broadcast(a, b) + return pow(a, b) + + +# >>> a = torch.tensor(-0.2500, dtype=torch.float64) +# tensor(-0.250000000000000, dtype=torch.float64) +# +# >>> b = torch.tensor(-0.0010, dtype=torch.float64) +# tensor(-0.001000000000000, dtype=torch.float64) +# +# Note: In this case, casting float to double will expand the float mantissa with zeros, +# while creating a double generates a distinct mantissa. +# >>> torch.tensor(-0.001).to(dtype=torch.float64) +# tensor(-0.001000000047497, dtype=torch.float64) +# +# Floor Division +# The difference is caused because torch.remainder(a, b) = -0.001. +# +# >>> torch.floor(torch.true_divide(a, b)) +# tensor(250., dtype=torch.float64) +# +# >>> torch.div(a, b, rounding_mode='floor') +# tensor(249., dtype=torch.float64) +# +# Definition: a // b = (a - remainder(a, b)) / b +# >>> torch.true_divide(torch.sub(a, torch.remainder(a, b)), b) +# tensor(249., dtype=torch.float64) +# +# For reference, see CPython's implementation: +# https://github.com/python/cpython/blob/ace008c531dd685a30c1dd68f9b5ba35f20171cf/Objects/floatobject.c#L636 + + +@_make_elementwise_binary_reference( + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_two_python_scalars=True, + should_register_decomposition=False, +) +def floor_divide( + a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType] +): + # Wrap scalars because some references only accept tensor arguments. + if isinstance(a, Number) and isinstance(b, Number): + a = scalar_tensor(a) + b = scalar_tensor(b) + elif isinstance(b, Number) and isinstance(a, Tensor): + b = scalar_tensor(b, dtype=a.dtype, device=a.device) + elif isinstance(a, Number) and isinstance(b, Tensor): + a = scalar_tensor(a, dtype=b.dtype, device=b.device) + elif isinstance(a, Tensor) and isinstance(b, Tensor) and a.device != b.device: + if a.device == torch.device("cpu"): + msg = "Expected divisor (b) to be on the same device ({}) as dividend (a), but it is found on {}!".format( + a.device, b.device + ) + raise RuntimeError(msg) + else: + b = prims.device_put(b, device=a.device) + + assert isinstance(a, Tensor) and isinstance(b, Tensor) + dtype = a.dtype + if utils.is_float_dtype(dtype): + return _floor_divide_float(a, b) + elif utils.is_integer_dtype(dtype): + return _floor_divide_integer(a, b) + else: + torch._check(False, lambda: f"{dtype} not supported for floor_divide") + + +def _floor_divide_integer(a: Tensor, b: Tensor) -> Tensor: + a, b = _maybe_broadcast(a, b) + + if not a.dtype.is_signed: + return prims.div(a, b) + + # Convert truncation to flooring: + offset = (torch.signbit(a) != torch.signbit(b)).logical_and(torch.fmod(a, b) != 0) + return prims.div(a, b) - _maybe_convert_to_dtype(offset, a.dtype) + + +def _floor_divide_float(a: Tensor, b: Tensor) -> Tensor: + mod = fmod(a, b) + div = true_divide(sub(a, mod), b) + + # Ensure that the remainder has the same sign as denominator + different_signed_inputs = bitwise_xor(lt(a, 0), lt(b, 0)) + non_zero_remainder = ne(mod, 0) + mask = bitwise_and(non_zero_remainder, different_signed_inputs) + div = where(mask, sub(div, 1), div) + + # Map quotient to nearest integer value + floor_div = floor(div) + mask = gt(sub(div, floor_div), 0.5) + floor_div = where(mask, add(floor_div, 1), floor_div) + + basic_div = true_divide(a, b) + zero_tensor = scalar_tensor(0, dtype=basic_div.dtype, device=basic_div.device) + + # If quotient is zero, copy signbit from true_divide quotient + floor_div = where(ne(div, 0), floor_div, copysign(zero_tensor, basic_div)) + + # If denominator is zero, then follow true_divide behavior + return where(ne(b, 0), floor_div, basic_div) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def fmax(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.fmax(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def fmin(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.fmin(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=True, +) +def fmod(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.fmod(a, b) + + +@register_decomposition(aten.frexp) +@out_wrapper("mantissa", "exponent") +def frexp(self: TensorLikeType) -> Tuple[TensorLikeType, TensorLikeType]: + return torch.return_types.frexp(prims.frexp(self)) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def gcd(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.gcd(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def ge(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.ge(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def gt(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.gt(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def heaviside(input: TensorLikeType, values: TensorLikeType) -> TensorLikeType: + input_eq_zero = torch.eq(input, 0) + input_lt_zero = torch.logical_or(torch.lt(input, 0), torch.isnan(input)) + zeros_and_ones = torch.where(input_lt_zero, 0, 1) + output = torch.where(input_eq_zero, values, zeros_and_ones) + return output + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def hypot(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.hypot(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def igamma(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.igamma(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def igammac(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.igammac(a, b) + + +def _check_close_args( + name: str, + a: TensorLikeType, + b: TensorLikeType, + rtol: float, + atol: float, +) -> None: + torch._check_value( + a.dtype == b.dtype, + lambda: f"{name}: Attempting to compare tensors of different dtypes {a.dtype} and {b.dtype}!", + ) + torch._check( + rtol >= 0, + lambda: f"{name}: rtol must be greater than or equal to zero, but got {rtol}!", + ) + torch._check( + atol >= 0, + lambda: f"{name}: atol must be greater than or equal to zero, but got {atol}!", + ) + + +# CompositeImplicitAutograd - don't register decomp +def isclose( + a: TensorLikeType, + b: TensorLikeType, + rtol: float = 1e-05, + atol: float = 1e-08, + equal_nan: bool = False, +) -> TensorLikeType: + _check_close_args(name="torch.isclose", a=a, b=b, rtol=rtol, atol=atol) + + close = eq(a, b) + if equal_nan and (utils.is_float_dtype(a.dtype) or utils.is_complex_dtype(a.dtype)): + close = logical_or(close, logical_and(isnan(a), isnan(b))) + + # Note: In case of zero tolerances the closeness inequality degenerates to an equality check. + # In this case, the short-circuit prevents false positives as detailed in the paragraph below. + if atol == 0 and rtol == 0: + return close + + # Note [closeness error computation] + # atol and rtol are provided as doubles, so the computation + # rtol * other will produce a float or complex tensor. + # When the difference (self - other) is compared to it then the + # tensor representing the difference will also be cast to float or complex. + # However, since (self - other) in uint8 is very likely to produce a + # negative value, this moves the cast forward so the difference is + # always computed in a float or complex type. + # If the values of the integer tensors cannot be exactly represented + # by the default scalar type then this may cause an incorrect result. + if not utils.is_float_dtype(a.dtype) and not utils.is_complex_dtype(a.dtype): + a = prims.convert_element_type(a, torch.get_default_dtype()) + b = prims.convert_element_type(b, torch.get_default_dtype()) + + allowed_error = add(atol, abs(mul(b, rtol))) + actual_error = abs(sub(a, b)) + + # Computes finite closeness + result = logical_or( + close, logical_and(isfinite(actual_error), le(actual_error, allowed_error)) + ) + + return result + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def lcm(a: TensorLikeType, b: TensorLikeType): + dtype = a.dtype + # promoting to int32 to maintain 100% consistency with C++ and to + # prevent overflow in case of int8 and int16 + promote_to_int = dtype in (torch.int8, torch.int16) + if promote_to_int: + a = prims.convert_element_type(a, torch.int32) + b = prims.convert_element_type(b, torch.int32) + + g = torch.gcd(a, b) + # Avoid division by zero in case gcd(0, 0) == 0 + g = torch.where(g == 0, 1, g) + res = torch.abs(prims.div(a, g) * b) + return res if not promote_to_int else prims.convert_element_type(res, dtype) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def le(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.le(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def logaddexp(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + # Nb. this implementation does not distribute the gradients evenly when a == b + mask = torch.real(a) >= torch.real(b) + max_ = torch.where(mask, a, b) + min_ = torch.where(mask, b, a) + inf_mask = torch.logical_and( + torch.logical_not(torch.isfinite(torch.real(a))), torch.real(a) == torch.real(b) + ) + if utils.is_complex_dtype(a.dtype) or utils.is_complex_dtype(b.dtype): + # are you wondering what this bunch of codes are for? edge cases! + neg_min_mask = torch.real(min_) < 0 + inf_vals = torch.where( + neg_min_mask, min_, torch.log(torch.exp(min_) + torch.exp(max_)) + ) + non_nan_vals = torch.where( + inf_mask, inf_vals, max_ + torch.log1p(torch.exp(min_ - max_)) + ) + # the type for full_like does not include tensor yet + nan_mask = torch.isnan(min_) + return torch.where(nan_mask, complex(float("nan"), float("nan")), non_nan_vals) # type: ignore[call-overload] + else: + return torch.where(inf_mask, a, max_ + torch.log1p(torch.exp(min_ - max_))) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def logaddexp2(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + torch._check( + not (utils.is_complex_dtype(a.dtype) or utils.is_complex_dtype(b.dtype)), + lambda: "logaddexp2 doesn't support complex dtypes", + ) + # Nb. this implementation does not distribute the gradients evenly when a == b + mask = a >= b + max_ = torch.where(mask, a, b) + min_ = torch.where(mask, b, a) + inf_mask = torch.logical_and(torch.isinf(a), a == b) + inv_log_2 = 1.0 / math.log(2) + result = max_ + torch.log1p(torch.exp2(min_ - max_)) * inv_log_2 + return torch.where(inf_mask, a, result) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) +def logical_and(a: TensorLikeType, b: TensorLikeType): + if not utils.is_boolean_dtype(a.dtype): + a = a != 0 + if not utils.is_boolean_dtype(b.dtype): + b = b != 0 + return a & b + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL) +def logical_not(a: TensorLikeType): + if not utils.is_boolean_dtype(a.dtype): + return a == 0 + return ~a + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) +def logical_or(a: TensorLikeType, b: TensorLikeType): + if not utils.is_boolean_dtype(a.dtype): + a = a != 0 + if not utils.is_boolean_dtype(b.dtype): + b = b != 0 + return bitwise_or(a, b) + + +# TODO: skip unnecessary conversion of long to float +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, +) +def logical_xor(a: TensorLikeType, b: TensorLikeType): + if not utils.is_boolean_dtype(a.dtype): + a = a != 0 + if not utils.is_boolean_dtype(b.dtype): + b = b != 0 + return a ^ b + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def lt(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.lt(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def maximum(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.maximum(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def minimum(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.minimum(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + supports_two_python_scalars=True, +) +def mul(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.mul(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.ALWAYS_BOOL, + supports_lhs_python_scalar=False, +) +def ne(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.ne(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH, + supports_lhs_python_scalar=False, + supports_rhs_python_scalar=False, +) +def nextafter(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.nextafter(a, b) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def remainder(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.remainder(a, b) + + +# reverse sub +@register_decomposition(aten.rsub) +@out_wrapper() +def rsub( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + alpha: NumberType = 1, +): + if isinstance(a, Number): + msg = "Received a Number for the first argument, but expected a Tensor" + raise ValueError(msg) + + return torch.sub(b, a, alpha=alpha) + + +# TODO: consider refactoring this with add impl +# sub has its own implementation because it has an alpha argument +@register_decomposition(aten.sub) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def sub( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + *, + alpha: NumberType = 1, +): + """ + Reference implementation of torch.sub + """ + + a, b = _maybe_broadcast(a, b) + + if alpha != 1: + dtype = a.dtype if isinstance(a, TensorLike) else b.dtype # type: ignore[union-attr] + python_type = utils.dtype_to_type(dtype) + if not utils.is_weakly_lesser_type(type(alpha), python_type): + msg = f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + if isinstance(b, torch.Tensor): + b = prims.mul(b, alpha) + else: + # Carefully not to use prims.mul if b is a scalar / symint. + # prims.mul always returns a tensor, + # which will mess with type promotion. + b = b * alpha + + output = prims.sub(a, b) + return handle_noncontiguous_outputs([a, b], output) + + +@_make_elementwise_binary_reference( + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, + name="true_divide", + aten_op=None, # CompositeImplicitAutograd + supports_two_python_scalars=True, +) +def true_divide(a: TensorLikeType, b: TensorLikeType) -> TensorLikeType: + return prims.div(a, b) + + +@register_decomposition(aten.xlogy) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def xlogy(a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType]): + torch._check( + isinstance(a, TensorLike) or isinstance(b, TensorLike), + lambda: 'Expected either argument a or b to be a Tensor"', + ) + + # Operations like eq and log do not handle scalar values, so we convert them to scalar_tensors. + if isinstance(b, TensorLike) and isinstance(a, Number): + a = scalar_tensor(a, dtype=b.dtype, device=b.device) + elif isinstance(a, TensorLike) and isinstance(b, Number): + b = scalar_tensor(b, dtype=a.dtype, device=a.device) + + # mypy: expected "Tensor" + assert isinstance(a, TensorLike) + assert isinstance(b, TensorLike) + rhs = torch.where(torch.eq(a, 0), 0, torch.mul(a, torch.log(b))) + return torch.where(torch.isnan(b), float("nan"), rhs) + + +@_make_elementwise_binary_reference( + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, + aten_op=None, # CompositeImplicitAutograd + supports_two_python_scalars=True, +) +def trunc_divide( + a: Union[TensorLikeType, NumberType], b: Union[TensorLikeType, NumberType] +): + dtype = utils.get_dtype(a) + if utils.is_integer_dtype(dtype): + return prims.div(a, b) + + return trunc(prims.div(a, b)) + + +# +# Elementwise Ternary References +# + + +@register_decomposition(aten.addcdiv) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "tensor1", "tensor2"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def addcdiv( + self: TensorLikeType, + tensor1: TensorLikeType, + tensor2: TensorLikeType, + *, + value: NumberType = 1, +) -> TensorLikeType: + """ + Reference implementation of torch.addcdiv + """ + if value is not None: + dtype = self.dtype # no scalars allowed, see add + python_type = utils.dtype_to_type(dtype) + torch._check_value( + utils.is_weakly_lesser_type(type(value), python_type), + lambda: f"value argument of type {type(value)} cannot be safely cast to type {python_type}!", + ) + + return self + value * tensor1 / tensor2 + + +@register_decomposition(aten.addcmul) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "tensor1", "tensor2"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def addcmul( + self: TensorLikeType, + tensor1: TensorLikeType, + tensor2: TensorLikeType, + *, + value: NumberType = 1, +) -> TensorLikeType: + """ + Reference implementation of torch.addcmul + """ + if value is not None: + dtype = self.dtype # no scalars allowed, see add + python_type = utils.dtype_to_type(dtype) + torch._check_value( + utils.is_weakly_lesser_type(type(value), python_type), + lambda: f"value argument of type {type(value)} cannot be safely cast to type {python_type}!", + ) + + return self + value * tensor1 * tensor2 + + +@register_decomposition(aten.clamp) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "min", "max"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def clamp( + a: TensorLikeType, + min: Optional[TensorOrNumberLikeType] = None, + max: Optional[TensorOrNumberLikeType] = None, +) -> TensorLikeType: + # NOTE: grad behavior with implementation `where` is not consistent on `nan` + if min is None and max is None: + msg = "clamp called but both min and max are none!" + raise ValueError(msg) + if min is not None: + a_isnan = torch.isnan(a) + condition = torch.bitwise_or(torch.ge(a, min), a_isnan) # type: ignore[arg-type] + # we should also propagate `nan` coming from boundaries. However, that's + # not necessary since `ge` would already `False` when either operands has + # a `nan`. So this line below is redundant + # `condition = bitwise_and(condition, bitwise_not(isnan(min)))` + a = torch.where(condition, a, min) # type: ignore[arg-type] + if max is not None: + a_isnan = torch.isnan(a) + # same as above, no need to adjust `nan` from `max` + condition = torch.bitwise_or(torch.le(a, max), a_isnan) # type: ignore[arg-type] + a = torch.where(condition, a, max) # type: ignore[arg-type] + + return a + + +@register_decomposition(aten.clamp_min) +@out_wrapper() +def clamp_min( + self: TensorLikeType, + min: Optional[TensorOrNumberLikeType] = None, +) -> TensorLikeType: + return torch.clamp(self, min=min) # type: ignore[arg-type] + + +@register_decomposition(aten.clamp_max) +@out_wrapper() +def clamp_max( + self: TensorLikeType, + max: Optional[TensorOrNumberLikeType] = None, +) -> TensorLikeType: + return torch.clamp(self, max=max) # type: ignore[arg-type] + + +# +# Conditional references +# + + +# https://pytorch.org/docs/stable/generated/torch.where.html +# TODO: implement alternate where +@register_decomposition(aten.where) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "b"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH, +) +def where( + pred: Tensor, + a: Optional[TensorOrNumberLikeType] = None, + b: Optional[TensorOrNumberLikeType] = None, +): + """ """ + + if a is None or b is None: + raise NotImplementedError + + utils.check_same_device(pred, a, b, allow_cpu_scalar_tensors=True) + torch._check( + pred.dtype is torch.bool, + lambda: f"expected predicate to be bool, got {pred.dtype}", + ) + + pred, a, b = _maybe_broadcast(pred, a, b) + return prims.where(pred, a, b) + + +# +# Data Movement References +# +@register_decomposition(aten.clone) +@out_wrapper() +def clone( + a: TensorLikeType, *, memory_format: torch.memory_format = torch.preserve_format +) -> TensorLikeType: + result = prims.clone(a, memory_format=memory_format) + return result + + +def copy_to(a: Tensor, b: Tensor, *, allow_cross_device=True): + if not allow_cross_device and a.device != b.device: + msg = "Attempting to copy from device {} to device {}, but cross-device copies are not allowed!".format( + b.device, a.device + ) + raise RuntimeError(msg) + + return prims.copy_to(a, b) + + +@register_decomposition(aten.item) +def item(a: TensorLikeType) -> NumberType: + if a.numel() != 1: + msg = f"Can't convert a tensor with {a.numel()} elements to a number!" + raise ValueError(msg) + + # NOTE: explicit conversion is necessary for bool! + # See https://github.com/pytorch/pytorch/issues/78071 + number_type = utils.dtype_to_type(a.dtype) + return number_type(prims.item(a)) + + +# fast path when `to` returns an alias to input. This mimics the same function in aten +def _to_will_alias( + a: TensorLikeType, + device: Optional[DeviceLikeType] = None, + dtype: Optional[torch.dtype] = None, + copy: Optional[bool] = None, + layout: Optional[torch.layout] = None, + memory_format: Optional[torch.memory_format] = None, + pin_memory: Optional[bool] = False, + non_blocking: bool = False, # not using non_blocking +) -> bool: + return ( + not copy + and (device is None or a.device == device) + and (dtype is None or a.dtype == dtype) + and (layout is None or a.layout == layout) + # is_pinned issue #84925 + # and (pin_memory is None or pin_memory == a.is_pinned()) + and ( + memory_format is None + or memory_format == torch.preserve_format + or utils.is_contiguous_for_memory_format(a, memory_format=memory_format) + ) + ) + + +@singledispatch +def _to_dispatch(*args, **kwargs): + raise NotImplementedError + + +@_to_dispatch.register +def _to_device( + device: torch.device, + dtype: torch.dtype, + non_blocking: bool = False, + copy: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> Dict[str, Any]: + kwargs = { + "device": device, + "dtype": dtype, + "non_blocking": non_blocking, + "copy": copy, + "memory_format": memory_format, + } + return kwargs + + +@_to_dispatch.register +def _to_device_str( + device: str, + dtype: torch.dtype, + non_blocking: bool = False, + copy: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> Dict[str, Any]: + kwargs = { + "device": torch.device(device), + "dtype": dtype, + "non_blocking": non_blocking, + "copy": copy, + "memory_format": memory_format, + } + return kwargs + + +@_to_dispatch.register +def _to_dtype( + dtype: torch.dtype, + non_blocking: bool = False, + copy: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> Dict[str, Any]: + kwargs = { + "dtype": dtype, + "non_blocking": non_blocking, + "copy": copy, + "memory_format": memory_format, + } + return kwargs + + +@_to_dispatch.register +def _to_other( + other: Tensor, + non_blocking: bool = False, + copy: bool = False, + memory_format: Optional[torch.memory_format] = None, +) -> Dict[str, Any]: + device = other.device + dtype = other.dtype + layout = other.layout + # is_pinned issue #84925 + # pin_memory = other.is_pinned() + kwargs = { + "device": device, + "dtype": dtype, + "layout": layout, + "non_blocking": non_blocking, + "copy": copy, + "memory_format": memory_format, + } + return kwargs + + +# remove to_kwargs that is already present in `a` +def _canonicalize_to_arguments(a: Tensor, to_kwargs: dict): + options_to_check = ["dtype", "device", "layout", "memory_format"] + # "device" option could be passed a str instead torch.device + if "device" in to_kwargs and isinstance(to_kwargs["device"], str): + to_kwargs["device"] = torch.device(to_kwargs["device"]) + + for kw in options_to_check: + if kw in to_kwargs: + if ( + (kw == "memory_format" and to_kwargs[kw] is torch.preserve_format) + or ( + kw == "device" + and to_kwargs[kw].type == a.device.type + and ( + not to_kwargs[kw].index or to_kwargs[kw].index == a.device.index + ) + ) + or ( + getattr(a, kw, None) == to_kwargs[kw] + ) # this also handles {"memory_format": None} + ): + to_kwargs.pop(kw) + + +def to(a: TensorLikeType, *args, **kwargs) -> TensorLikeType: + # handled dispatch via positional arguments + if len(args) != 0: + kwargs = _to_dispatch(*args, **kwargs) + + # TODO: is_pinned is not currently supported in refs or fake_tensor + # https://github.com/pytorch/pytorch/issues/84925 + assert "pin_memory" not in kwargs + _canonicalize_to_arguments(a, kwargs) + + if _to_will_alias(a, **kwargs): + return a + + copy = kwargs.pop("copy") if "copy" in kwargs else False + non_blocking = kwargs.pop("non_blocking") if "non_blocking" in kwargs else False + + # short-circuit to `prims.convert_element_type` when `to` is just a dtype change + if ( + (copy or (kwargs.get("dtype", a.dtype) != a.dtype)) + and (not non_blocking) + and ("memory_format" not in kwargs) + and ("device" not in kwargs) + and ("layout" not in kwargs) + # is_pinned issue #84925 + # and ("pin_memory" not in kwargs) + ): + return prims.convert_element_type(a, kwargs.get("dtype", a.dtype)) + + result = torch.empty_like(a, **kwargs) + # TODO: non_blocking should be handled by `copy_to` + copy_to(result, a) + return result + + +# +# Reduction references +# + + +def _reduction( + a: TensorLikeType, + prim: Callable, + *, + has_identity: bool = True, + accepts_dim_tuple: bool = True, # to handle min/argmin that accept single dim only + dims: Optional[DimsType] = None, + keepdims: bool = False, + dtype: Optional[torch.dtype] = None, # should be specified for ops that support it + out: Optional[Tensor] = None, + output_dtype_kind: REDUCTION_OUTPUT_TYPE_KIND, +) -> TensorLikeType: # it is usually SAME, but I want + # ref writers to actually think about what to put here + assert isinstance(a, TensorLike) + if a.ndim > 64: + raise RuntimeError( + f"Received a tensor with {a.ndim} dimensions, but only tensors with up to 64 dims are supported!" + ) + + if out is not None: + assert isinstance(out, TensorLike) + if dtype is not None: + # TODO - this is true for eager mode currently, but it's wrong behavior for complex norms + if dtype != out.dtype: + raise RuntimeError( + "dtype argument and out dtype must match in reduction" + ) + if not accepts_dim_tuple: + assert dims is None or isinstance(dims, Dim) + if isinstance(dims, Dim): + dims = (dims,) # type: ignore[assignment] + dims = utils.reduction_dims(a.shape, dims) + if not has_identity: + valid_shape = a.ndim == 0 or py_all(a.shape[i] for i in dims) + if not valid_shape: + raise RuntimeError( + "reducing over zero-size dimension for reduction operation without identity" + ) + computation_dtype, result_dtype = utils.reduction_dtypes( + a, output_dtype_kind, dtype + ) + a = _maybe_convert_to_dtype(a, computation_dtype) # type: ignore[method-assign] + result = prim(a, dims) + if keepdims: + output_shape = [a.shape[i] if i not in dims else 1 for i in range(a.ndim)] + broadcast_dims = [i for i in range(a.ndim) if i not in dims] + result = prims.broadcast_in_dim(result, output_shape, broadcast_dims) + + if out is not None: + assert result_dtype is not None + if dtype is not None and result_dtype != out.dtype: + raise RuntimeError( + "Expected the dtype of reduction result and out to match" + ) + out = _maybe_resize_out(out, result.shape) + return _safe_copy_out(copy_from=result, copy_to=out) # type: ignore[arg-type] + + if result.dtype != result_dtype and result_dtype is not None: + result = prims.convert_element_type(result, result_dtype) + + return result + + +def _make_copy_from_view(fn): + """ + Given a view function (e.g. torch.diagonal) generates its copy variant (e.g. torch.diagonal_copy) + """ + name = fn.__name__ + fn = out_wrapper()(fn) + + def _fn(*args, out=None, **kwargs): + result = fn(*args, out=out, **kwargs) + if out is None: + return result.clone(memory_format=torch.contiguous_format) + return result + + copy_name = f"{name}_copy" + _fn.__name__ = copy_name + _fn = register_decomposition(getattr(aten, copy_name))(_fn) + return _fn + + +# Saves Python all +py_all = all + + +@register_decomposition(aten.all) +@out_wrapper() +def all( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, +) -> TensorLikeType: + result = torch.logical_not(torch.any(torch.logical_not(a), dim, keepdim=keepdim)) + + if a.dtype == torch.uint8: + result = result.to(dtype=torch.uint8) + + return result + + +# Saves Python any +py_any = any + + +@register_decomposition(aten.any) +@out_wrapper() +def any( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, +) -> TensorLikeType: + a_ = _maybe_convert_to_dtype(a, torch.bool) + if isinstance(dim, (list, tuple)) and len(dim) == 0: + result = a_.clone() + else: + result = a_.sum(dim=dim, keepdim=keepdim).ne(False) + + # Preserves uint8 -- probably a legacy mask thing + if a.dtype is torch.uint8: + return prims.convert_element_type(result, torch.uint8) + + return result + + +@register_decomposition([aten.sum.dim_IntList, aten.sum.IntList_out]) +def sum( + a: TensorLikeType, + dim: Union[Optional[int], Optional[List[int]]] = None, + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + if dtype is None: + if out is not None: + dtype = out.dtype + elif utils.is_boolean_dtype(a.dtype) or utils.is_integer_dtype(a.dtype): + dtype = torch.int64 + else: + dtype = a.dtype + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + return _reduction( + a, + prims.sum, + dims=dim, + keepdims=keepdim, + dtype=dtype, + out=out, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME, + ) + + +def sum_to_size( + a: Tensor, + *shape, +) -> Tensor: + shape = utils.extract_shape_from_varargs(shape, validate=False) + torch._check( + utils.is_expandable_to(shape, a.shape), + lambda: f'sum_to_size: size "{shape}" is not expandable to size "{a.shape}"', + ) + # In ATen scalar tensors are sent through sum and the result is returned as + # type promoted + if utils.is_same_shape(shape, a.shape) and len(shape) > 0: + return prims.view_of(a) + leading_dims = a.ndim - len(shape) + reduce_dims = tuple(range(leading_dims)) + tuple( + i + for i in range(leading_dims, len(shape)) + if shape[i - leading_dims] == 1 and a.shape[i] != 1 + ) + return torch.sum(a, dim=reduce_dims, keepdim=True, dtype=None) + + +@register_decomposition(aten.prod) +def prod( + a: TensorLikeType, + dim: Union[Optional[int], Optional[List[int]]] = None, + keepdim: bool = False, + *, + dtype=None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + if dtype is None: + if out is not None: + dtype = out.dtype + elif utils.is_boolean_dtype(a.dtype) or utils.is_integer_dtype(a.dtype): + dtype = torch.int64 + else: + dtype = a.dtype + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + return _reduction( + a, + prims.prod, + dims=dim, + keepdims=keepdim, + dtype=dtype, + out=out, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME, + ) + + +@register_decomposition(aten.amin) +def amin( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + out: Optional[Tensor] = None, +) -> TensorLikeType: + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + + return _reduction( + a, + prims.amin, + dims=dim, + keepdims=keepdim, + dtype=None, + out=out, + has_identity=False, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME, + ) + + +@register_decomposition(aten.amax) +def amax( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + out: Optional[Tensor] = None, +) -> TensorLikeType: + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + + return _reduction( + a, + prims.amax, + dims=dim, + keepdims=keepdim, + dtype=None, + out=out, + has_identity=False, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.SAME, + ) + + +def _dim_var_dispatch(dim=None, unbiased=None): + # There's the following overload of torch.var: + # var(Tensor self, bool unbiased=True) -> (Tensor, Tensor) + # We need to explicitly convert bool dims to unbiased arg + if unbiased is None and isinstance(dim, bool): + unbiased = dim + dim = None + return dim, unbiased + + +@register_decomposition(aten.var) +@out_wrapper() +def var( + a: TensorLikeType, + dim: Optional[DimsType] = None, + unbiased: Optional[bool] = None, + keepdim: bool = False, + *, + correction: Optional[NumberType] = None, +) -> TensorLikeType: + dim, unbiased = _dim_var_dispatch(dim, unbiased) + correction = utils.set_correction(unbiased, correction) + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + + result = _reduction( + a, + partial(prims.var, correction=correction), + dims=dim, + keepdims=keepdim, + dtype=None, + out=None, + has_identity=True, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT, + ) + return result + + +@register_decomposition(aten.std) +@out_wrapper() +def std( + a: TensorLikeType, + dim: Union[Optional[int], Optional[List[int]]] = None, + unbiased: Optional[bool] = None, + keepdim: bool = False, + *, + correction: Optional[NumberType] = None, +) -> TensorLikeType: + dim, unbiased = _dim_var_dispatch(dim, unbiased) + correction = utils.set_correction(unbiased, correction) + + opmath_dtype, dtype = utils.reduction_dtypes( + a, REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT + ) + a = _maybe_convert_to_dtype(a, opmath_dtype) + a_var = torch.var(a, dim, correction=correction, keepdim=keepdim) + a_std = torch.sqrt(a_var) + assert dtype is not None + return _maybe_convert_to_dtype(a_std, dtype) + + +@register_decomposition(aten.mean) +def mean( + a: TensorLikeType, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + dtype=None, + out=None, +) -> TensorLikeType: + # reduces over all dimensions if dim=() is passed + if dim == () or dim == []: + dim = None + orig_dtype = dtype + if dtype is None: + dtype = a.dtype + # can't use out wrapper because of this argument + torch._check( + out is None or out.dtype == dtype, + lambda: f"Expected out tensor to have dtype {dtype}, but got {out.dtype} instead", + ) + result = _reduction( + a, + prims.sum, + dims=dim, + keepdims=keepdim, + dtype=dtype, + out=None, + output_dtype_kind=REDUCTION_OUTPUT_TYPE_KIND.KEEP_PROMOTED_TYPE, + ) + torch._check( + utils.is_float_dtype(dtype) or utils.is_complex_dtype(dtype), + lambda: ( + f"mean(): could not infer output dtype. " + f"{'Input' if orig_dtype is None else 'Optional'} dtype must be either " + f"a floating point or complex dtype. Got: {dtype}" + ), + ) + if isinstance(dim, Dim): + dim = (dim,) # type: ignore[assignment] + dims = utils.reduction_dims(a.shape, dim) # type: ignore[arg-type] + nelem = 1 if a.ndim == 0 else reduce(operator.mul, (a.shape[i] for i in dims), 1) + result = true_divide(result, nelem) + result_dtype = a.dtype if dtype is None else dtype + result = _maybe_convert_to_dtype(result, result_dtype) # type: ignore[method-assign] + if out is not None: + assert isinstance(out, TensorLike) + out = _maybe_resize_out(out, result.shape) + return _safe_copy_out(copy_from=result, copy_to=out) # type: ignore[arg-type] + return result + + +@register_decomposition(aten.std_mean) +@out_wrapper("out0", "out1") +def std_mean( + a: TensorLikeType, + dim: Optional[DimsType] = None, + *, + unbiased: Optional[bool] = None, + keepdim: bool = False, + correction: Optional[NumberType] = None, +): + dim, unbiased = _dim_var_dispatch(dim, unbiased) + correction = utils.set_correction(unbiased, correction) + opmath_dtype, dtype = utils.reduction_dtypes( + a, REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT + ) + original_dtype = a.dtype + a = _maybe_convert_to_dtype(a, opmath_dtype) + a_var, a_mean = torch.var_mean(a, dim, correction=correction, keepdim=keepdim) + a_std = torch.sqrt(a_var) + assert dtype is not None + return ( + _maybe_convert_to_dtype(a_std, dtype), + _maybe_convert_to_dtype(a_mean, original_dtype), + ) + + +@register_decomposition(aten.var_mean) +@out_wrapper("out0", "out1") +def var_mean( + a: TensorLikeType, + dim: Optional[DimsType] = None, + unbiased: Optional[bool] = None, + keepdim: bool = False, + *, + correction: Optional[NumberType] = None, +): + dim, unbiased = _dim_var_dispatch(dim, unbiased) + v = var(a, dim, unbiased, keepdim, correction=correction) + m = mean(a, dim, keepdim) + return v, m + + +@register_decomposition(aten.addr) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "vec1", "vec2"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def addr( + self: TensorLikeType, + vec1: TensorLikeType, + vec2: TensorLikeType, + *, + beta: NumberType = 1, + alpha: NumberType = 1, +) -> TensorLikeType: + torch._check( + vec1.ndim == 1, + lambda: f"addr: Expected 1-D argument vec1, but got {vec1.ndim}-D", + ) + torch._check( + vec2.ndim == 1, + lambda: f"addr: Expected 1-D argument vec2, but got {vec2.ndim}-D", + ) + self = self.expand(vec1.shape[0], vec2.shape[0]) + if utils.is_boolean_dtype(self.dtype): + # Integers are accepted for booleans + torch._check( + is_weakly_lesser_type(type(beta), int), + lambda: f"expected bool/int beta but got {type(beta)}", + ) + torch._check( + is_weakly_lesser_type(type(alpha), int), + lambda: f"expected bool/int alpha but got {type(beta)}", + ) + if not beta: + return torch.outer(vec1, vec2) if alpha else torch.full_like(self, False) + else: + return torch.logical_or( + self, + torch.outer(vec1, vec2) if alpha else torch.full_like(self, False), + ) + else: + torch._check( + is_weakly_lesser_type(type(beta), dtype_to_type(self.dtype)), + lambda: f"cannot safely convert {type(beta)} to {self.dtype}", + ) + torch._check( + is_weakly_lesser_type(type(alpha), dtype_to_type(self.dtype)), + lambda: f"cannot safely convert {type(alpha)} to {self.dtype}", + ) + if beta == 0: + # This means NaNs from self are dropped if beta is zero + return alpha * torch.outer(vec1, vec2) + else: + return beta * self + alpha * torch.outer(vec1, vec2) + + +# CompositeImplicitAutograd - don't register decomp +def atleast_1d( + arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType +) -> Union[TensorLikeType, Tuple[TensorLikeType, ...]]: + """Reference implementation of :func:`torch.atleast_1d`.""" + if not args and isinstance(arg, collections.abc.Sequence): + args_ = arg + else: + assert not isinstance(arg, collections.abc.Sequence) + args_ = (arg,) + args + res = tuple(a if a.ndim >= 1 else unsqueeze(a, 0) for a in args_) + return res if len(res) > 1 else res[0] + + +# Helper function with assert to avoid MyPy error +# of incompatible type passed to unsqueeze +def _unsqueeze_atleast( + at_least_fn: Callable, dim: int, arg: TensorLikeType +) -> TensorLikeType: + arg_ = at_least_fn(arg) + assert isinstance(arg_, TensorLike) + return unsqueeze(arg_, dim) + + +# CompositeImplicitAutograd - don't register decomp +def atleast_2d( + arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType +) -> Union[TensorLikeType, Tuple[TensorLikeType, ...]]: + """Reference implementation of :func:`torch.atleast_2d`.""" + if not args and isinstance(arg, collections.abc.Sequence): + args_ = arg + else: + assert not isinstance(arg, collections.abc.Sequence) + args_ = (arg,) + args + unsqueeze_atleast_1d = partial(_unsqueeze_atleast, atleast_1d, 0) + res = tuple(a if a.ndim >= 2 else unsqueeze_atleast_1d(a) for a in args_) + return res if len(res) > 1 else res[0] + + +# CompositeImplicitAutograd - don't register decomp +def atleast_3d( + arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType +) -> Union[TensorLikeType, Tuple[TensorLikeType, ...]]: + """Reference implementation of :func:`torch.atleast_3d`.""" + if not args and isinstance(arg, collections.abc.Sequence): + args_ = arg + else: + assert not isinstance(arg, collections.abc.Sequence) + args_ = (arg,) + args + unsqueeze_atleast_2d = partial(_unsqueeze_atleast, atleast_2d, -1) + res = tuple(a if a.ndim >= 3 else unsqueeze_atleast_2d(a) for a in args_) + return res if len(res) > 1 else res[0] + + +def as_strided( + a: TensorLikeType, + size: ShapeType, + stride: StrideType, + storage_offset: Optional[int] = None, +) -> TensorLikeType: + storage_offset_int = ( + storage_offset if storage_offset is not None else a.storage_offset() + ) + return prims.as_strided(a, size, stride, storage_offset_int) + + +@register_decomposition(aten.as_strided_scatter) +@out_wrapper() +def as_strided_scatter( + input: TensorLikeType, + src: TensorLikeType, + size: ShapeType, + stride: StrideType, + storage_offset: Optional[int] = None, +) -> TensorLikeType: + storage_offset_int = 0 if storage_offset is None else storage_offset + return prims.as_strided_scatter(input, src, size, stride, storage_offset_int) + + +def broadcast_shapes(*shapes) -> ShapeType: + return torch.Size(_broadcast_shapes(*shapes)) + + +@aten.broadcast_tensors.default.py_impl(DispatchKey.CompositeImplicitAutograd) +@aten.broadcast_tensors.default.py_impl(DispatchKey.Meta) +def broadcast_tensors(*tensors) -> List[TensorLikeType]: + if len(tensors) == 1 and not isinstance(tensors[0], Tensor): + tensors = tensors[0] + return list(_maybe_broadcast(*tensors, preserve_cpu_scalar_tensors=False)) + + +# CompositeImplicitAutograd - don't register decomp +def broadcast_to(a: TensorLikeType, size: ShapeType) -> TensorLikeType: + start = len(size) - len(a.shape) + dims = tuple(range(start, len(a.shape) + start)) + return prims.broadcast_in_dim(a, size, dims) + + +@register_decomposition(aten.cat) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("tensors",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.NO_OPMATH, +) +def cat(tensors: TensorSequenceType, dim: int = 0) -> TensorLikeType: + def cat_compute_output_memory_format(inputs): + format = None + for t in inputs: + f = utils.suggest_memory_format(t) + if f == torch.contiguous_format: + return f + if format is not None and format != f: + return torch.contiguous_format + format = f + assert format is not None + return format + + if len(tensors) == 0: + msg = "cat expects at least one tensor, but received zero!" + raise ValueError(msg) + + for tensor in tensors: + assert isinstance(tensor, TensorLike) + + utils.check_same_device(*tensors, allow_cpu_scalar_tensors=False) + + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + # This is a bit tricky. Naively, you would expect to just pick one + # arbitrary tensor and check that all tensors match this tensor. However, + # there is legacy behavior which says that if you have a 1-D empty tensor + # (0,), this is permissible. So you can't assume that all the tensors + # have same dimensionality, and you can't assume that the first tensor is + # the correct stencil. + # + # We'll implement this in a few passes. First, we will try to infer the + # ndim of the cat output. If this ndim != 1, then we know that all ndim = + # 1 inputs must be empty, or are errors. If this ndim == 1, then life + # is easy (the legacy special case coincides with regular handling). + # + # NB: The regular implementation of cat just filters out empty inputs, + # but we do it slightly different here for better handling for unbacked + # SymInts + + example = None + for i, t in enumerate(tensors): + if example is None: + if t.ndim != 1: + example = t + else: + if t.ndim != 1: + torch._check( + t.ndim == example.ndim, + lambda: "Number of dimensions of tensors must match. " + f"Expected {example.ndim}-D tensors, but got {t.ndim}-D for " + f"tensor number {i} in the list", + ) + + if example is None: + # example is None if everything is 1-D. If so, just arbitrarily pick + # the first one + example = tensors[0] + + shape = example.shape + filtered = [] + for tensor_idx, tensor in enumerate(tensors): + if len(shape) != len(tensor.shape): + assert tensor.ndim == 1 # we've already checked this above + # Don't suggest the legacy behavior in the error message + torch._check( + tensor.shape[0] == 0, + lambda: f"Number of dimensions of tensors must match. " + f"Expected {example.ndim}-D tensors, but got 1-D for " + f"tensor number {tensor_idx} in the list", + ) + else: + # Remove inputs that are 1-D, zero size + if tensor.ndim == 1 and guard_size_oblivious(tensor.shape[0] == 0): + continue + # Don't bother checking size match, prims.cat will handle it + filtered.append(tensor) + + memory_format = cat_compute_output_memory_format(tensors) + + if len(filtered) == 0: + t = tensors[0] + + # TODO: fix this to work with meta tensors + try: + requires_grad = any(x.requires_grad for x in tensors) + except Exception: + requires_grad = False + + return empty( + (0,), + dtype=t.dtype, + device=t.device, + requires_grad=requires_grad, + memory_format=memory_format, + ) + + dim = utils.canonicalize_dim(filtered[0].ndim, dim) + utils.validate_idx(filtered[0].ndim, dim) + + return prims.cat(filtered, dim).clone(memory_format=memory_format) + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def column_stack(tensors: TensorSequenceType) -> TensorLikeType: + aligned_tensors = tuple( + x if x.ndim > 1 else x.reshape((x.numel(), 1)) for x in tensors + ) + return cat(aligned_tensors, 1) + + +def conj(input: TensorLikeType) -> TensorLikeType: + if not utils.is_complex_dtype(input.dtype): + return input + if input.is_sparse: + return torch.conj_physical(input) + return prims.conj(input) + + +# This replicates at::constant_pad_nd, defined in ATen/native/PadNd.cpp +@register_decomposition(aten.constant_pad_nd) +@out_wrapper() +def constant_pad_nd( + input: TensorLikeType, pad: List[int], value: NumberType = 0 +) -> TensorLikeType: + torch._check( + len(pad) % 2 == 0, + lambda: f"Length of pad must be even but instead it equals {len(pad)}", + ) + + input_sizes = input.shape + l_inp = len(input_sizes) + + l_pad = len(pad) // 2 + l_diff = l_inp - l_pad + + torch._check( + l_inp >= l_pad, + lambda: "Length of pad should be no more than twice the number of " + f"dimensions of the input. Pad length is {len(pad)} while the input has " + f"{l_inp} dimensions.", + ) + + c_input = input + for i in range(l_diff, l_inp): + pad_idx = 2 * (l_inp - i - 1) + if pad[pad_idx] < 0: + c_input = c_input.narrow(i, -pad[pad_idx], c_input.shape[i] + pad[pad_idx]) + + if pad[pad_idx + 1] < 0: + c_input = c_input.narrow(i, 0, c_input.shape[i] + pad[pad_idx + 1]) + + # if none of the pads are positive we can just return the result + if builtins.all(p <= 0 for p in pad): + return c_input.clone() + + new_shape = list(input_sizes[:l_diff]) + + for i in range(l_pad): + pad_idx = len(pad) - ((i + 1) * 2) + new_dim = input_sizes[l_diff + i] + pad[pad_idx] + pad[pad_idx + 1] + torch._check( + new_dim > 0, + lambda: f"The input size {input_sizes[l_diff + i]}, plus negative padding " + f"{pad[pad_idx]} and {pad[pad_idx + 1]} resulted in a negative output size, " + f"which is invalid. Check dimension {l_diff + i} of your input.", + ) + new_shape.append(new_dim) + + memory_format = utils.suggest_memory_format(input) + output = torch.empty( + new_shape, + dtype=input.dtype, + device=input.device, + requires_grad=input.requires_grad, + memory_format=memory_format, + ) + + if value == 0 and input.dtype == torch.bool: + value = False + # torch.fill isn't typed to allow complex values + output = torch.fill(output, value) # type: ignore[arg-type] + + c_output = output + for i in range(l_diff, l_inp): + pad_idx = 2 * (l_inp - i - 1) + if pad[pad_idx] > 0: + c_output = c_output.narrow( + i, pad[pad_idx], c_output.shape[i] - pad[pad_idx] + ) + if pad[pad_idx + 1] > 0: + c_output = c_output.narrow(i, 0, c_output.shape[i] - pad[pad_idx + 1]) + + prims.copy_to(c_output, c_input) + return output + + +def contiguous( + a: Tensor, *, memory_format: torch.memory_format = torch.contiguous_format +) -> Tensor: + torch._check( + memory_format != torch.preserve_format, + lambda: "preserve memory format is unsupported by the contiguous operator", + ) + + if utils.is_contiguous_for_memory_format(a, memory_format=memory_format): + return a + + return torch.clone(a, memory_format=memory_format) + + +@out_wrapper() +def dstack(tensors: TensorSequenceType) -> TensorLikeType: + torch._check(len(tensors) > 0, lambda: "dstack expects a non-empty TensorList") + aligned_tensors = atleast_3d(*tensors) + return cat(aligned_tensors, 2) + + +@register_decomposition(aten.expand) +def expand(a: Tensor, *shape) -> Tensor: + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + # NOTE: cannot use utils.extract_shape_from_varargs here + # because that also validates the shape, but the shape + # given to expand may be "invalid" + if len(shape) == 1 and isinstance(shape[0], Sequence): + shape = tuple(shape[0]) + + torch._check( + len(shape) >= len(a.shape), + lambda: "expand: the requested shape has too few dimensions!", + ) + + offset = len(shape) - len(a.shape) + shape_ = list(shape) + for idx, x in enumerate(a.shape): + offset_idx = idx + offset + requested_length = shape[offset_idx] + torch._check( + guard_size_oblivious(requested_length == x) + or guard_size_oblivious(x == 1) + or requested_length == -1, + lambda: f"expand: attempting to expand a dimension of length {x}!", + ) + + shape_[offset_idx] = requested_length if requested_length != -1 else x + + # At this point shape must be valid + utils.validate_shape(shape_) + + return prims.broadcast_in_dim( + a, shape_, tuple(range(offset, len(a.shape) + offset)) + ) + + +# CompositeImplicitAutograd - don't register decomp +def expand_as(a: Tensor, b: Tensor) -> Tensor: + return a.expand(b.shape) + + +def chunk(a: TensorLikeType, chunks: int, dim: int = 0) -> Tuple[TensorLikeType, ...]: + if chunks <= 0: + msg = f"Expected at least one chunk, but got {chunks}!" + raise ValueError(msg) + + dim = utils.canonicalize_dim(a.ndim, dim) + length = a.shape[dim] + chunk_size = math.ceil(length / chunks) + full_chunks = math.floor(length / chunk_size) + tail_chunk_size = length % chunk_size + + result = [] + for i in range(full_chunks): + result.append(narrow(a, dim, i * chunk_size, chunk_size)) + + if tail_chunk_size != 0: + result.append(narrow(a, dim, full_chunks * chunk_size, tail_chunk_size)) + + return tuple(result) + + +# Note: flatten, unlike other shape operators, returns the input tensor on a no-op (unless +# a 0D tensor is flattened, in which case it's returned in 1D) +# CompositeImplicitAutograd - don't register decomp +def flatten(a: TensorLikeType, start_dim: int = 0, end_dim: int = -1) -> TensorLikeType: + start_dim = utils.canonicalize_dim(a.ndim, start_dim) + end_dim = utils.canonicalize_dim(a.ndim, end_dim) + + # Short-circuits on no-op + if start_dim == end_dim and a.ndim != 0: + return a + + # Tries to take a view + # TODO: we could look at directing collapse_view to skip its meta function here (unsafe_collapse_view) + new_shape, new_strides = prims._collapse_view_helper(a, start_dim, end_dim) + if new_shape is not None: + return prims.collapse_view(a, start_dim, end_dim) + + # Makes a copy if it can't make a view + return prims.collapse(a, start_dim, end_dim) + + +@register_decomposition(aten.flip) +@out_wrapper() +def flip(a: TensorLikeType, dims: DimsSequenceType) -> TensorLikeType: + if not isinstance(dims, tuple) and not isinstance(dims, list): + raise ValueError("dims has to be a sequence of ints") + dims = utils.canonicalize_dims(a.ndim, dims) # type: ignore[assignment] + utils.validate_no_repeating_dims(dims) + return prims.rev(a, dims) + + +# CompositeImplicitAutograd - don't register decomp +def fliplr(a: TensorLikeType) -> TensorLikeType: + if a.ndim < 2: + raise RuntimeError("Input must be >= 2-d.") + + return flip(a, (1,)) + + +# CompositeImplicitAutograd - don't register decomp +def flipud(a: TensorLikeType) -> TensorLikeType: + if a.ndim < 1: + raise RuntimeError("Input must be >= 1-d.") + + return flip(a, (0,)) + + +# CompositeImplicitAutograd - don't register decomp +def narrow( + a: TensorLikeType, dim: int, start: Union[int, TensorLikeType], length: int +) -> TensorLikeType: + # Supports Tensor overload that was added for XLA: + # https://github.com/pytorch/pytorch/issues/31558 + if isinstance(start, TensorLike): + torch._check( + start.dim() == 0 and utils.is_integer_dtype(start.dtype), + lambda: "start must be an 0-dim integral Tensor.", + ) + start = start.item() # type: ignore[assignment] + torch._check(a.dim() > 0, lambda: "narrow() cannot be applied to a 0-dim tensor.") + torch._check(length >= 0, lambda: "narrow(): length must be non-negative.") + dim = utils.canonicalize_dim(a.ndim, dim) + dim_length = a.size(dim) + torch._check_with( + IndexError, + -dim_length <= start and start <= dim_length, # type: ignore[arg-type] + lambda: f"start out of range (expected to be in range of [{-dim_length}, {dim_length}], but got {start})", + ) + if start < 0: + start = start + dim_length + torch._check( + start <= dim_length - length, # type: ignore[arg-type] + lambda: f"start ({start}) + length ({length}) exceeds dimension size ({dim_length}).", + ) + return prims.slice_in_dim(a, start, start + length, axis=dim) + + +# TODO: This must return a sparse tensor if the input is sparse, but refs have +# no sparse support. See narrow_copy_sparse in core. +narrow_copy = _make_copy_from_view(narrow) + + +def _normalize( + a: Tensor, norm_dims: DimsType, eps: float +) -> Tuple[Tensor, Tensor, Tensor]: + """Computes mean and 1/std of a tensor along norm_dims. + + Used as a helper function for normalization layers. + + Args: + a (Tensor): input tensor + norm_dims (DimsType): dimensions to normalize over + eps (float): epsilon for numerical stability + + Returns: + out (Tensor): normalized tensor. + mean (Tensor): mean of the tensor along norm_dims. + rstd (Tensor): 1/std of the tensor along norm_dims. + """ + norm_dims = utils.canonicalize_dims(a.ndim, norm_dims) + computation_dtype = utils.get_computation_dtype(a.dtype) + a_acc = _maybe_convert_to_dtype(a, computation_dtype) + assert isinstance(a_acc, TensorLike) # to avoid mypy error for var_mean + biased_var, mean = torch.var_mean( + a_acc, dim=norm_dims, unbiased=False, keepdim=True + ) + rstd = torch.rsqrt(biased_var + eps) + out = (a - mean) * rstd + return out, mean, rstd + + +# add all specified dimensions +def _unsqueeze_multiple(x: TensorLikeType, dimensions: List[int]) -> TensorLikeType: + for dim in sorted(dimensions): + x = torch.unsqueeze(x, dim) + return x + + +@register_decomposition(aten.native_group_norm.default) +def native_group_norm( + input: Tensor, + weight: Optional[Tensor], + bias: Optional[Tensor], + batch_size: int, + num_channels: int, + flattened_inner_size: int, + num_groups: int, + eps: float, +) -> Tuple[Tensor, Tensor, Tensor]: + torch._check( + input.ndim >= 2, + lambda: f"Expected at least 2 dimensions for input tensor but received {input.ndim}", + ) + torch._check( + num_channels % num_groups == 0, + lambda: "Expected number of channels in input to be divisible by num_groups, " + + f"but got input of shape {input.shape} and num_groups = {num_groups}", + ) + + # num_channels / num_groups and flattened inner dimension are the reduction axes + reduction_dims = [2, 3] + input_reshaped = torch.reshape( + input, + [batch_size, num_groups, num_channels // num_groups, flattened_inner_size], + ) + out, mean, rstd = _normalize(input_reshaped, reduction_dims, eps) + out = out.view(input.shape) + + broadcast_dims = [0] + list(range(2, input.ndim)) + unsqueeze_bias = None + if bias is not None: + unsqueeze_bias = _unsqueeze_multiple(bias, broadcast_dims) + unsqueeze_weight = None + if weight is not None: + unsqueeze_weight = _unsqueeze_multiple(weight, broadcast_dims) + + if unsqueeze_weight is not None: + out = out * unsqueeze_weight + if unsqueeze_bias is not None: + out = out + unsqueeze_bias + + out = _maybe_convert_to_dtype(out, input.dtype) # type: ignore[assignment] + mean = _maybe_convert_to_dtype(mean, input.dtype) # type: ignore[assignment] + rstd = _maybe_convert_to_dtype(rstd, input.dtype) # type: ignore[assignment] + + # remove broadcast dimensions from mean and rstd + mean = torch.squeeze(mean, reduction_dims) + rstd = torch.squeeze(rstd, reduction_dims) + return (out, mean, rstd) + + +@register_decomposition(aten.native_layer_norm) +@out_wrapper("out0", "out1", "out2") +def native_layer_norm( + input: Tensor, + normalized_shape: ShapeType, + weight: Optional[Tensor], + bias: Optional[Tensor], + eps: float, +) -> Tuple[Tensor, Tensor, Tensor]: + normalized_ndim = len(normalized_shape) + torch._check( + normalized_ndim >= 1, + lambda: "Expected normalized_shape to be at least 1-dimensional, i.e., " + + "containing at least one element, but got normalized_shape = " + + str(normalized_shape), + ) + # torch.Size([1, 2, 3]) == [1, 2, 3] evaluates to False + # while torch.Size([1, 2, 3]) == (1, 2, 3) is True + # therefore we use tuple(normalized_shape) + torch._check( + weight is None or weight.shape == tuple(normalized_shape), + lambda: "Expected weight to be of same shape as normalized_shape, but got " + + "weight of shape " + + str(weight.shape) # type: ignore[union-attr] + + " and normalized_shape = " + + str(normalized_shape), + ) + torch._check( + bias is None or bias.shape == tuple(normalized_shape), + lambda: "Expected bias to be of same shape as normalized_shape, but got " + + "bias of shape " + + str(bias.shape) # type: ignore[union-attr] + + " and normalized_shape = " + + str(normalized_shape), + ) + torch._check( + input.ndim >= normalized_ndim + and input.shape[(input.ndim - normalized_ndim) :] == tuple(normalized_shape), + lambda: "Given normalized_shape=" + + str(normalized_shape) + + ", expected input with shape " + + str(normalized_shape) + + ", but got input of size " + + str(input.shape), + ) + + input = input.contiguous() + if weight is not None: + weight = weight.contiguous() + if bias is not None: + bias = bias.contiguous() + + axis = input.ndim - normalized_ndim + reduction_dims = list(range(axis, input.ndim)) + out, mean, rstd = _normalize(input, reduction_dims, eps) + + if weight is None and bias is not None: + out = out + bias + elif weight is not None and bias is None: + out = out * weight + elif weight is not None and bias is not None: + out = out * weight + bias + + out = _maybe_convert_to_dtype(out, input.dtype) # type: ignore[assignment] + if input.device.type == "cpu": + mean = _maybe_convert_to_dtype(mean, input.dtype) # type: ignore[assignment] + rstd = _maybe_convert_to_dtype(rstd, input.dtype) # type: ignore[assignment] + return (out, mean, rstd) + + +# TODO: Adding this as a meta function causes functorch tests to fail when compiled with debug mode. +# test/test_eager_transforms.py::TestFunctionalizeCPU::test_functionalize_fx_transpose_simple_cpu +@register_decomposition(aten.permute) +def permute(a: TensorLikeType, *dims) -> TensorLikeType: + _permutation = utils.canonicalize_dims( + a.ndim, utils.extract_dims_from_varargs(dims) + ) + return prims.transpose(a, _permutation) + + +@register_decomposition(aten.renorm) +@out_wrapper() +def renorm( + input: TensorLikeType, p: RealNumberType, dim: int, maxnorm: RealNumberType +) -> TensorLikeType: + torch._check(not isinstance(p, complex), lambda: "renorm: p must be real-valued") + torch._check(p > 0, lambda: "renorm: non-positive norm not supported") + torch._check( + not isinstance(maxnorm, complex), lambda: "renorm: maxnorm must be real-valued" + ) + torch._check( + maxnorm >= 0, lambda: f"renorm: expected maxnorm to be >= 0 but got {maxnorm}" + ) + ndim = input.ndim + torch._check( + ndim > 1, + lambda: f"renorm: input needs at least 2 dimensions, got {ndim} dimensions", + ) + + dim = utils.canonicalize_dim(ndim, dim) + reduce_dims = list(range(ndim)) + del reduce_dims[dim] + + # For half and bfloat16, calculate norm in float precision then cast + # normalization factor to half + acc_type = utils.get_computation_dtype(input.dtype) + if acc_type != input.dtype: + norm = torch.linalg.vector_norm( + input, p, reduce_dims, keepdim=True, dtype=acc_type + ) + else: + norm = torch.linalg.vector_norm(input, p, reduce_dims, keepdim=True) + + eps = 1e-7 + norm_factor = torch.where(norm > maxnorm, maxnorm / (norm + eps), 1.0) + if acc_type != input.dtype: + norm_factor = prims.convert_element_type(norm_factor, input.dtype) + return (input * norm_factor).contiguous() + + +# CompositeImplicitAutograd - don't register decomp +@aten.stft.center.py_impl(DispatchKey.CompositeImplicitAutograd) +def stft( + input: Tensor, + n_fft: int, + hop_length: Optional[int] = None, + win_length: Optional[int] = None, + window: Optional[Tensor] = None, + center: bool = True, + pad_mode: str = "reflect", + normalized: bool = False, + onesided: Optional[bool] = None, + return_complex: Optional[bool] = None, +) -> Tensor: + torch._check( + window is None or window.device == input.device, + lambda: ( + f"stft input and window must be on the same device but got self on {input.device}" + + f" and window on {window.device}" # type: ignore[union-attr] + ), + ) + + hop_length_ = hop_length if hop_length is not None else n_fft // 4 + win_length_ = win_length if win_length is not None else n_fft + + if return_complex is None: + return_complex_ = input.is_complex() or ( + window is not None and utils.is_complex_dtype(window.dtype) + ) + torch._check( + return_complex_, + ( + "stft requires the return_complex parameter be given for real inputs, " + + "and will further require that return_complex=True in a future PyTorch release." + ), + ) + else: + return_complex_ = return_complex + + torch._check( + utils.is_float_dtype(input.dtype) or utils.is_complex_dtype(input.dtype), + lambda: "stft expected a tensor of floating point or complex values", + ) + torch._check(1 <= input.ndim <= 2, lambda: "stft expected a 1D or 2D tensor") + + original_ndim = input.ndim + if original_ndim == 1: + input = input.unsqueeze(0) + + if center: + extra_dims = 3 - input.ndim + pad_amount = n_fft // 2 + extended_shape = [*itertools.repeat(1, extra_dims), *input.shape] + input = aten.pad(input.view(extended_shape), [pad_amount, pad_amount], pad_mode) + input = input.view(input.size()[extra_dims:]) + + batch = input.size(0) + length = input.size(1) + torch._check( + 0 < n_fft <= length, + lambda: f"stft expected 0 < n_fft <= {length}, but got n_fft={n_fft}", + ) + torch._check( + hop_length_ > 0, + lambda: f"stft expected hop_length > 0 but got hop_length={hop_length_}", + ) + torch._check( + 0 < win_length_ <= n_fft, + lambda: f"stft expected 0 < win_length <= n_fft but got win_length={win_length_}", + ) + torch._check( + window is None or window.shape == (win_length_,), + lambda: ( + f"expected a 1D window tensor of size equal to win_length={win_length_}, " + + f"but got window with size {window.shape}" # type: ignore[union-attr] + ), + ) + + if win_length_ < n_fft: + if window is None: + window = torch.ones(win_length_, dtype=input.dtype, device=input.device) + left = (n_fft - win_length_) // 2 + window = aten.constant_pad_nd(window, [left, n_fft - win_length_ - left]) + + input = input.unfold(dimension=-1, size=n_fft, step=hop_length_) + if window is not None: + input = input * window + + complex_fft = utils.is_complex_dtype(input.dtype) + onesided = onesided if onesided is not None else not complex_fft + norm = "ortho" if normalized else None + if onesided: + torch._check( + not complex_fft, + lambda: "Cannot have onesided output if window or input is complex", + ) + out = torch.fft.rfft(input, dim=-1, norm=norm) + else: + out = torch.fft.fft(input, dim=-1, norm=norm) + + out.transpose_(1, 2) + + if original_ndim == 1: + out = out.squeeze_(0) + + return out if return_complex_ else torch.view_as_real(out) + + +# CompositeImplicitAutograd - don't register decomp +@aten.istft.default.py_impl(DispatchKey.CompositeImplicitAutograd) +def istft( + input: Tensor, + n_fft: int, + hop_length: Optional[int] = None, + win_length: Optional[int] = None, + window: Optional[Tensor] = None, + center: bool = True, + normalized: bool = False, + onesided: Optional[bool] = None, + length: Optional[int] = None, + return_complex=False, +) -> Tensor: + torch._check( + window is None or window.device == input.device, + lambda: ( + f"istft input and window must be on the same device but got self on {input.device}" + + f" and window on {window.device}" # type: ignore[union-attr] + ), + ) + + hop_length_ = hop_length if hop_length is not None else n_fft // 4 + win_length_ = win_length if win_length is not None else n_fft + + torch._check( + utils.is_complex_dtype(input.dtype), + lambda: ( + "istft input and window must be on the same device but got self on " + + f"{input.device} and window on {window.device}" # type: ignore[union-attr] + ), + ) + n_frames = input.size(-1) + fft_size = input.size(-2) + + expected_output_signal_len = n_fft + hop_length_ * (n_frames - 1) + torch._check(input.numel() > 0, lambda: "istft input tensor cannot be empty") + torch._check( + 2 <= input.ndim <= 3, + lambda: f"istft expected a tensor with 2 or 3 dimensions, but got {input.ndim}", + ) + onesided_ = onesided if onesided is not None else fft_size != n_fft + + if onesided_: + torch._check( + n_fft // 2 + 1 == fft_size, + lambda: ( + "istft expected the frequency dimension (3rd to the last) of the input tensor " + + "to match n_fft / 2 + 1 when onesided=True, but got {fft_size}" + ), + ) + else: + torch._check( + n_fft == fft_size, + lambda: ( + "istft expected the frequency dimension (3rd to the last) of the input tensor " + + "to match n_fft when onesided=False, but got {fft_size}", + ), + ) + + torch._check( + 0 < hop_length_ <= win_length_, + lambda: "istft expected 0 < hop_length <= win_length", + ) + torch._check( + 0 < win_length_ <= n_fft, lambda: "istft expected 0 < win_length <= n_fft" + ) + torch._check( + window is None or window.shape == (win_length_,), + lambda: "Invalid window shape. window has to be 1D and length of `win_length`", + ) + + if window is None: + real_dtype = utils.corresponding_real_dtype(input.dtype) + window_ = torch.ones(win_length_, dtype=real_dtype, device=input.device) + else: + window_ = window + + if win_length_ != n_fft: + left = (n_fft - win_length_) // 2 + window_ = aten.constant_pad_nd(window_, (left, n_fft - win_length_ - left), 0) + + original_ndim = input.ndim + if input.ndim == 2: + input = input.unsqueeze(0) + + input = input.transpose(1, 2) + norm = "ortho" if normalized else None + if return_complex: + torch._check( + not onesided_, + lambda: "cannot have onesided output if window or input is complex", + ) + input = torch.fft.ifft(input, dim=-1, norm=norm) + else: + torch._check( + window is None or not utils.is_complex_dtype(window.dtype), + lambda: "Complex windows are incompatible with return_complex=False", + ) + if not onesided_: + input = input.narrow(dim=-1, start=0, length=n_fft // 2 + 1) + input = torch.fft.irfft(input, dim=-1, norm=norm) + + assert input.size(2) == n_fft + + y_tmp = input * window_.view([1, 1, n_fft]) + y = aten.unfold_backward( + y_tmp, + input_sizes=(y_tmp.size(0), expected_output_signal_len), + dim=1, + size=n_fft, + step=hop_length_, + ) + window_envelop = aten.unfold_backward( + window_.pow(2).expand((1, n_frames, n_fft)), + input_sizes=(y_tmp.size(0), expected_output_signal_len), + dim=1, + size=n_fft, + step=hop_length_, + ) + + assert expected_output_signal_len == y.size(1) + assert expected_output_signal_len == window_envelop.size(1) + + start = n_fft // 2 if center else 0 + if length is not None: + end = start + length + elif center: + end = expected_output_signal_len - n_fft // 2 + else: + end = expected_output_signal_len + + length = max(0, end - start) + y = y.narrow(dim=1, start=start, length=length) + window_envelop = window_envelop.narrow(dim=1, start=start, length=length) + + window_envelop_lowest = window_envelop.abs().min().lt(1e-11) + torch._check( + not window_envelop_lowest.item(), + lambda: "window overlap add min less than 1e-11", + ) + + y = y / window_envelop + if original_ndim == 2: + y = y.squeeze(0) + + if end > expected_output_signal_len: + warnings.warn( + "The length of signal is shorter than the length parameter. Result is being " + + "padded with zeros in the tail. Please check your center and hop_length settings" + ) + y = aten.constant_pad_nd(y, (0, end - expected_output_signal_len), 0) + return y + + +# Get the new shape and stride after applying unfold to an input tensor +def _get_unfold_shape_stride( + a_shape: ShapeType, a_stride: StrideType, dimension: int, size: int, step: int +): + a_ndim = len(a_shape) + dim = utils.canonicalize_dim(a_ndim, dimension, wrap_scalar=True) + max_size = 1 if a_ndim == 0 else a_shape[dim] + last_stride = 1 if a_ndim == 0 else a_stride[dim] + + torch._check( + size <= max_size, + lambda: f"Maximum size for tensor at dimension {dim} is {max_size} but size is {size}", + ) + + torch._check( + step > 0, + lambda: f"Step is {step} but must be > 0", + ) + + shape = list(a_shape) + strides = list(a_stride) + shape.append(size) + strides.append(last_stride) + if dim < a_ndim: + shape[dim] = (shape[dim] - size) // step + 1 + strides[dim] *= step + return shape, strides + + +@register_decomposition(aten.repeat) +@out_wrapper() +def repeat(a: Tensor, *repeat_shape) -> Tensor: + repeat_shape = utils.extract_shape_from_varargs(repeat_shape, validate=False) + torch._check( + len(repeat_shape) >= len(a.shape), + lambda: "repeat: Number of dimensions of repeat dims can not be smaller than number of dimensions of tensor", + ) + + if len(repeat_shape) == 0: + return torch.clone(a) + + num_new_dimensions = len(repeat_shape) - a.ndim + padded_shape = [1] * num_new_dimensions + for dim_size in a.shape: + padded_shape.append(dim_size) + + target_shape = tuple( + padded_size * repeat_size + for padded_size, repeat_size in zip(padded_shape, repeat_shape) + ) + + # return an empty tensor if one of the repeat_shape dimensions is zero + if 0 in repeat_shape: + return torch.empty( + target_shape, + dtype=a.dtype, + device=a.device, + requires_grad=a.requires_grad, + memory_format=utils.suggest_memory_format(a), + ) + + urtensor_shape = target_shape + urtensor_stride = utils.make_contiguous_strides_for(target_shape) + for dim, dim_size in enumerate(padded_shape): + # repeat each dimension by using unfold_copy operation + urtensor_shape, urtensor_stride = _get_unfold_shape_stride( + urtensor_shape, urtensor_stride, dim, dim_size, max(dim_size, 1) + ) + + # derive permute order by sorting urtensor strides + enumerated_stride = list(enumerate(urtensor_stride)) + enumerated_stride.sort(key=lambda item: item[1], reverse=True) + permute_order, sorted_stride = zip(*enumerated_stride) + + # add new and expand dimensions according to urtensor + repeat_xtensor = a.expand(urtensor_shape) + + # clone tensor to concretize expanded dimensions + cloned_result = torch.clone(repeat_xtensor) + + # transpose axis so strides are in sorted order + permuted_result = cloned_result.permute(permute_order) + + # reshape to get contiguous tensor with correct target shape + return permuted_result.reshape(target_shape) + + +def _reshape_view_helper(a: TensorLikeType, *shape, allow_copy: bool) -> TensorLikeType: + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious, sym_eq + + # Creates a valid shape + shape = utils.extract_shape_from_varargs(shape, validate=False) + # Reshape may be given a shape with a -1 length + # This indicates that the dimension's length should be inferred + shape = utils.infer_size(shape, a.numel()) + + # Short-circuits if shape is the same + if guard_size_oblivious(sym_eq(tuple(a.shape), tuple(shape))): + return prims.view_of(a) + + # Special-cases tensors with no elements + if guard_size_oblivious(a.numel() == 0): + return as_strided(a, shape, utils.make_contiguous_strides_for(shape)) + + # Special-cases reshaping zero dim tensors + if a.ndim == 0: + _a = a + for length in shape: + assert length == 1 + _a = unsqueeze(_a, -1) + return _a + + # Special-cases reshaping to zero dim tensors + if len(shape) == 0: + _a = a + for length in a.shape: + assert length == 1 + _a = squeeze(_a, -1) + return _a + + # Handles general case: a 1+D tensor reshaped into a distinct 1+D shape + + # NOTE [Reshape Algorithm] + # This algorithm works by attempting to greedily construct the desired dimensions in + # the output shape, left to right. It does this by, conceptually, accumulating + # dimensions of the original tensor, also left to right, until the dimension + # can be constructed using prims.split_dim. + # The algorithm also has special handling for tail squeezes/unsqueezes, like + # if a reshape from (5, 5) to (5, 5, 1) or vice versa. + # + # This algorithm does not flatten the original tensor and then split dims as appropriate + # because that would create copies more often than this algorithm. flatten is the only + # operation below which can create a view or a copy, and while it prefers creating + # views it may sometimes create a copy if the tensor's strides do not permit a view. + # As a result, this algorithm tries to minimize flattening. + # + # Note that a better version of this algorithm may exist. Regions which could be + # flattened without creating a copy can be identified in advance, and that might + # allow fewer flatten calls or faster short-circuiting to make a copy. + idx = 0 + a_ = a + for length in shape: + # Handles tail unsqueezes + if idx >= a_.ndim: + assert length == 1 + last_dim = a_.ndim - 1 + # NOTE: using split_dim instead of unsqueeze may seem silly here, + # but it's necessary to get the strides correct + a_ = prims.split_dim(a_, last_dim, a_.shape[last_dim]) + idx = idx + 1 + continue + + # Skips dimensions that are already the correct length + if guard_size_oblivious(length == a_.shape[idx]): + idx = idx + 1 + continue + + # Gathers enough original dimensions such that this new dimension can be created + # Note that this accumulation will terminate because we've verified a and the shape + # specify the same number of elements above + accum = a_.shape[idx] + end = idx + while guard_size_oblivious(accum % length != 0): + end = end + 1 + accum = accum * a_.shape[end] + if end != idx: + # NOTE: in this case multiple dimensions must be flatten to create the desired dimension + # This flattening is why reshape sometimes creates a copy -- because flattening + # may return a view of a copy + + # Checks if collapse can be a view and short-circuits to copying reshape if it can't + new_shape, new_strides = prims._collapse_view_helper(a_, idx, end) + if new_shape is None: + if allow_copy: + return prims.reshape(a, shape) + + msg = "Cannot view a tensor with shape {} and strides {} as a tensor with shape {}!".format( + a.shape, a.stride(), shape + ) + raise ValueError(msg) + + a_ = flatten(a_, idx, end) + + # Splits the (possibly flattened) dimension to create the desired dim length + if guard_size_oblivious(accum != length): + a_ = prims.split_dim(a_, idx, length) + + idx = idx + 1 + + # Squeezes tail + while idx < a_.ndim: + assert a_.shape[idx] == 1 + a_ = squeeze(a_, idx) + + return a_ + + +# CompositeImplicitAutograd - don't register decomp +# NOTE: shape is a vararg because Tensor.reshape can be called with as +# Tensor.reshape(a, b, c) or Tensor.reshape((a, b, c)) Function call +# torch.reshape doesn't support unpacked shapes +def reshape(a: TensorLikeType, *shape: ShapeType) -> TensorLikeType: + return _reshape_view_helper(a, *shape, allow_copy=True) + + +# CompositeImplicitAutograd - don't register decomp +def reshape_as(self: TensorLikeType, other: TensorLikeType) -> TensorLikeType: + return self.reshape(other.size()) + + +@register_decomposition(aten.roll) +@out_wrapper() +def roll( + a: TensorLikeType, shifts: DimsType, dims: DimsType = tuple() +) -> TensorLikeType: + """Reference implementation of :func:`torch.roll`.""" + dims = utils.canonicalize_dims(a.ndim, dims) + # ATen specifies int[1] type for shifts and dims which expands integers to tuples of length 1 + if not isinstance(shifts, Iterable): + shifts = (shifts,) + if not isinstance(dims, Iterable): + dims = (dims,) + + # Avoid modulo by zero + if a.numel() == 0: + # Keeping this as ref for now as FakeTensor runs into some issues with complex tensors + return a.clone() + + if a.dim() == 0 and len(dims) > 0: + raise IndexError( + f"Dimension specified as {dims[0]} but tensor has no dimensions" + ) + + len_shifts = len(shifts) + len_dims = len(dims) + if len_shifts != 1 or len_dims != 1: + if len_shifts == 0: + raise RuntimeError("`shifts` required") + # Takes care of the case when dims is not specified (default) + # By default, the tensor is flattened before shifting, after which the original shape is restored + if len_dims == 0 and len_shifts == 1: + return torch.roll(torch.flatten(a), shifts, 0).view(a.shape) + if len_shifts != len_dims: + raise RuntimeError( + f"shifts and dimensions must align. shifts: {len_shifts}, dims: {len_dims}" + ) + assert len_dims > 1 + tail_shifts = shifts[1:] + tail_dims = dims[1:] + first_dim_rolled = torch.roll(a, (shifts[0],), dims[0]) + return torch.roll(first_dim_rolled, tail_shifts, tail_dims) + + # This path is taken when only one dimension is rolled + # For example to get `first_dim_rolled` above + dim = dims[0] + size = a.shape[dim] + start = (size - shifts[0]) % size + idx = torch.arange(size, device=a.device) + return a.index_select(dim, torch.fmod(start + idx, size)) + + +@register_decomposition(aten.rot90) +@out_wrapper() +def rot90( + a: TensorLikeType, k: int = 1, dims: DimsSequenceType = (0, 1) +) -> TensorLikeType: + """Reference implementation of :func:`torch.rot90`.""" + if len(dims) != 2: + raise RuntimeError( + f"expected total rotation dims == 2, but got dims = {len(dims)}" + ) + if a.ndim < 2: + raise RuntimeError(f"expected total dims >= 2, but got total dims = {a.ndim}") + + # Do this after the initial checks to be compatible with the behavior in + # core. + dims = utils.canonicalize_dims(a.ndim, dims) + + if dims[0] == dims[1]: + raise RuntimeError( + f"expected rotation dims to be different, but got dim0 = {dims[0]} and dim1 = {dims[1]}" + ) + k = k % 4 # Rotation direction is from the second towards the first axis for k < 0 + if k == 1: + return torch.transpose(torch.flip(a, (dims[1],)), dims[0], dims[1]) + elif k == 2: + return torch.flip(a, dims) + elif k == 3: + return torch.transpose(torch.flip(a, (dims[0],)), dims[0], dims[1]) + else: + return clone(a, memory_format=torch.contiguous_format) + + +def _check_stack_inputs(tensors: TensorSequenceType) -> None: + entry_shape = tensors[0].shape + for i in range(1, len(tensors)): + assert tensors[i].shape == entry_shape, ( + f"stack expects each tensor to be equal size, but got {entry_shape} at entry 0" + f"and {tensors[i].shape} at entry {i}" + ) + + +@register_decomposition(aten.stack) +@out_wrapper() +def stack(tensors: TensorSequenceType, dim: int = 0) -> TensorLikeType: + assert len(tensors) > 0, "stack expects a non-empty TensorList" + wrapped_dim = utils.canonicalize_dim(tensors[0].ndim + 1, dim) + # Refs need sparse support to check other condition + if wrapped_dim < tensors[0].ndim: # and not tensors[0].is_sparse: + _check_stack_inputs(tensors) + result_sizes = list(tensors[0].shape) + result_sizes.insert(wrapped_dim, len(tensors)) + out = torch.cat(tensors, wrapped_dim) + return out.view(result_sizes) + + # If dim == tensors[0].ndim, view cannot efficiently handle it + return torch.cat([t.unsqueeze(wrapped_dim) for t in tensors], dim) + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def softmax( + a: TensorLikeType, + dim: int, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + result_dtype = dtype or a.dtype + computation_dtype = utils.get_computation_dtype(result_dtype) + a_ = _maybe_convert_to_dtype(a, computation_dtype) + if a.numel() == 0: + a_exp = exp(a_) + else: + a_max = amax(a_, dim, keepdim=True) + a_exp = exp(a_ - a_max) + return _maybe_convert_to_dtype( + true_divide(a_exp, sum(a_exp, dim, keepdim=True)), result_dtype + ) # type: ignore[return-value] + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def hstack(tensors: TensorSequenceType) -> TensorLikeType: + torch._check(len(tensors) > 0, lambda: "hstack expects a non-empty TensorList") + aligned_tensors = atleast_1d(*tensors) + if aligned_tensors[0].ndim == 1: + return cat(aligned_tensors, 0) + return cat(aligned_tensors, 1) + + +# CompositeImplicitAutograd - don't register decomp +@out_wrapper() +def vstack(tensors: TensorSequenceType) -> TensorLikeType: + torch._check(len(tensors) > 0, lambda: "vstack expects a non-empty TensorList") + aligned_tensors = atleast_2d(*tensors) + return cat(aligned_tensors, 0) + + +# CompositeImplicitAutograd - don't register decomp +def unflatten(a: TensorLikeType, dim: int, sizes: ShapeType) -> TensorLikeType: + dim = utils.canonicalize_dim(a.ndim, dim) + torch._check(len(sizes) != 0, lambda: "unflatten: sizes must be non-empty") + return a.view(tuple(a.shape[:dim]) + tuple(sizes) + tuple(a.shape[dim + 1 :])) + + +@register_decomposition(aten.unbind) +def unbind(t: TensorLikeType, dim: int = 0) -> TensorSequenceType: + dim = utils.canonicalize_dim(t.ndim, dim) + torch._check_index( + len(t.shape) > 0, + lambda: "Dimension specified as 0 but tensor has no dimensions", + ) + if t.shape[dim] == 0: + return tuple() + else: + return tuple( + torch.squeeze(s, dim) for s in torch.tensor_split(t, t.shape[dim], dim) + ) + + +@out_wrapper() +def index_copy(x: TensorLike, dim: int, index: TensorLike, tensor: TensorLike): + return x.clone(memory_format=torch.contiguous_format).index_copy_( + dim, index, tensor + ) + + +def index_copy_(x: TensorLike, dim: int, index: TensorLike, tensor: TensorLike): + dim = utils.canonicalize_dims(x.ndim, dim) + torch._check( + index.ndim <= 1, + lambda: f"Index should have dimension 1 or 0 (got {index.ndim})", + ) + # Treat scalars as elements of \R^1 + y = x.unsqueeze(0) if x.ndim == 0 else x + idx = (slice(None),) * dim + (index,) + y[idx] = tensor + return x + + +@register_decomposition(aten.index_fill) +@out_wrapper() +def index_fill( + x: TensorLike, dim: int, index: TensorLike, value: Union[NumberType, TensorLike] +): + return _index_fill(x, dim, index, value, inplace=False) + + +@register_decomposition(aten.index_fill_) +def index_fill_( + x: TensorLike, dim: int, index: TensorLike, value: Union[NumberType, TensorLike] +): + return _index_fill(x, dim, index, value, inplace=True) + + +def _index_fill( + x: TensorLike, + dim: int, + index: TensorLike, + value: Union[NumberType, TensorLike], + *, + inplace: bool, +): + torch._check( + index.ndim <= 1, + lambda: f"Index should have dimension 1 or 0 (got {index.ndim})", + ) + if isinstance(value, TensorLike): + torch._check( + value.ndim == 0, + lambda: "Only supports 0-dimensional value tensor. " # type: ignore[union-attr] + f"Got a tensor with {value.ndim} dimensions.", + ) # type: ignore[arg-type] + else: + value = torch.scalar_tensor( + value, dtype=x.dtype, layout=x.layout, device=x.device # type: ignore[arg-type] + ) + + # index_copy has some unnecessary preconditions when x is a scalar. We do this to work through them + zero_dim = x.ndim == 0 + y = x.unsqueeze(0) if zero_dim else x + # index_copy does not broadcast on value so we have to do it manually + shape = list(y.shape) + shape[dim] = index.numel() + value = value.expand(shape) + index_copy = Tensor.index_copy_ if inplace else torch.index_copy + out = index_copy(y, dim, index, value) # type: ignore[operator] + if inplace: + return x + else: + if zero_dim: + # The clone is necessary so that it returns a fresh tensor rather than a view + out = out.squeeze(0).clone() + # index_fill preserves the strides. index_copy always returns contiguous tensors + if out.stride() != x.stride(): + new_out = torch.empty_like(x) + new_out.copy_(out) + out = new_out + return out + + +@out_wrapper() +def index_add( + x: TensorLike, + dim: int, + index: TensorLike, + tensor: TensorLike, + *, + alpha: NumberType = 1, +): + # index_add always returns a new contiguous tensor + return x.clone(memory_format=torch.contiguous_format).index_add_( + dim, index, tensor, alpha=alpha # type: ignore[arg-type] + ) + + +@register_decomposition(aten.index_select) +@out_wrapper() +def index_select(x: TensorLike, dim: int, index: TensorLike): + dim = utils.canonicalize_dims(x.ndim, dim) + torch._check( + index.ndim <= 1, + lambda: f"Index should have dimension 1 or 0 (got {index.ndim})", + ) + if index.ndim == 0: + index = index.unsqueeze(0) + if x.ndim == 0: + # Treat scalars as elements of \R^1 + # We cannot use x[idx] here as it accesses item() (??), hence this awkward construction + return torch.empty_like(x).index_copy(0, index, x.expand_as(index)) + + idx = (slice(None),) * dim + (index,) + return x[idx] + + +@register_decomposition(aten.squeeze.dims) +def squeeze(a: TensorLikeType, dim: Optional[DimsType] = None) -> TensorLikeType: + from torch.fx.experimental.symbolic_shapes import guard_size_oblivious + + if dim is None: + dims = tuple(idx for idx, size in enumerate(a.shape) if size == 1) + return prims.squeeze(a, dims) if dims else prims.view_of(a) + + ndim = a.ndim + dim = utils.canonicalize_dims(ndim, dim) + dims = (dim,) if isinstance(dim, Dim) else dim + # Short-circuits if the tensor has no dimensions + if ndim == 0: + assert len(dims) == 0 or dims == (0,) + return prims.view_of(a) + + # Note: squeeze does not modify tensors when the given dim is not a dimension of length 1 + dims = tuple(d for d in dims if guard_size_oblivious(a.shape[d] == 1)) + if len(dims) == 0: + return prims.view_of(a) + if len(dims) == 1: + return prims.squeeze(a, dims) + dims_list = list(dims) + dims_list = sorted(dims_list, reverse=True) + for i in dims_list: + a = squeeze(a, i) + return a + + +# Note: does not work with TensorMetas because of data-dependent control-flow +# CompositeImplicitAutograd - don't register decomp +def tensor_split( + a: TensorLikeType, + indices_or_sections: Union[Tensor, DimsType], + dim: int = 0, +) -> Tuple[TensorLikeType, ...]: + _dim = utils.canonicalize_dim(a.ndim, dim) + if a.ndim == 0: + msg = "tensor_split: received a rank zero tensor, but expected a tensor of rank one or greater!" + raise ValueError(msg) + + # If indices_or_sections is a tensor, it must be a CPU Long tensor + if isinstance(indices_or_sections, TensorLike): + if not indices_or_sections.device.type == "cpu": + msg = "tensor_split: if indices_or_sections is a tensor it must be on the CPU, but received one on {}".format( + indices_or_sections.device + ) + raise ValueError(msg) + if indices_or_sections.dtype != torch.long: + msg = "tensor_split: if indices_or_sections is a tensor it must have long dtype, " + f" but received one with dtype {indices_or_sections.dtype}" + raise ValueError(msg) + + # Case 0 -- indices_or_sections is an integer or a scalar tensor n and a is split along dim into n parts of equal-ish length + if isinstance(indices_or_sections, IntLike) or ( + isinstance(indices_or_sections, TensorLike) and indices_or_sections.ndim == 0 + ): + sections: int = ( + indices_or_sections # type: ignore[assignment] + if isinstance(indices_or_sections, Number) + else indices_or_sections.item() + ) + + if sections <= 0: + msg = f"tensor_split: number of sections must be greater than 0, but was {sections}" + raise ValueError(msg) + + splits = [] + dim_size = a.shape[_dim] + min_split_size = math.floor(dim_size / sections) + num_splits_one_extra = dim_size % sections + start_idx = 0 + for split_idx in range(sections): + split_size = ( + min_split_size + 1 + if (split_idx < num_splits_one_extra) + else min_split_size + ) + s = prims.slice_in_dim(a, start_idx, start_idx + split_size, axis=_dim) + splits.append(s) + start_idx = start_idx + split_size + + return tuple(splits) + # Case 1 -- indices_or_sections is a sequence of integers or a 1D tensor describing the splits + else: + indices = indices_or_sections + if isinstance(indices_or_sections, TensorLike): + if indices_or_sections.ndim != 1: + msg = "tensor_split: non-scalar indices_or_sections tensors must have only one dimension, " + f"but received a tensor with {indices_or_sections.ndim} dimensions" + raise ValueError(msg) + + indices = indices_or_sections.tolist() + + splits = [] + start_idx = 0 + for x in indices: + splits.append(prims.slice_in_dim(a, start_idx, x, axis=_dim)) + start_idx = x + splits.append(prims.slice_in_dim(a, start_idx, a.shape[_dim], axis=_dim)) + return tuple(splits) + + +# CompositeImplicitAutograd - don't register decomp +def hsplit( + a: TensorLikeType, indices_or_sections: DimsType +) -> Tuple[TensorLikeType, ...]: + torch._check( + a.ndim >= 1, + lambda: ( + "torch.hsplit requires a tensor with at least 1 dimension, but got a tensor with " + + str(a.ndim) + + " dimensions!" + ), + ) + dim = 0 if a.ndim == 1 else 1 + if isinstance(indices_or_sections, IntLike): + split_size = indices_or_sections + torch._check( + (split_size != 0 and a.shape[dim] % split_size == 0), + lambda: ( + "torch.hsplit attempted to split along dimension " + + str(dim) + + ", but the size of the dimension " + + str(a.shape[dim]) + + " is not divisible by the split_size " + + str(split_size) + + "!" + ), + ) + return tensor_split(a, split_size, dim) + + torch._check_type( + isinstance(indices_or_sections, (list, tuple)), + lambda: ( + "hsplit(): received an invalid combination of arguments. " + "Expected indices_or_sections to be of type int, list of ints or tuple of ints " + f"but got type {type(indices_or_sections)}" + ), + ) + + split_sizes = indices_or_sections + return tensor_split(a, split_sizes, dim) + + +# CompositeImplicitAutograd - don't register decomp +def vsplit( + a: TensorLikeType, indices_or_sections: DimsType +) -> Tuple[TensorLikeType, ...]: + torch._check( + a.ndim >= 2, + lambda: ( + "torch.vsplit requires a tensor with at least 2 dimension, but got a tensor with " + + str(a.ndim) + + " dimensions!" + ), + ) + if isinstance(indices_or_sections, IntLike): + split_size = indices_or_sections + torch._check( + (split_size != 0 and a.shape[0] % split_size == 0), + lambda: ( + f"torch.vsplit attempted to split along dimension 0" + f", but the size of the dimension " + f"{a.shape[0]}" + f" is not divisible by the split_size " + f"{split_size}" + f"!" + ), + ) + return tensor_split(a, split_size, 0) + + torch._check_type( + isinstance(indices_or_sections, (list, tuple)), + lambda: ( + "vsplit(): received an invalid combination of arguments. " + "Expected indices_or_sections to be of type int, list of ints or tuple of ints " + f"but got type {type(indices_or_sections)}" + ), + ) + + split_sizes = indices_or_sections + return tensor_split(a, split_sizes, 0) + + +@register_decomposition(aten.diag.out) +@out_wrapper() +def diag( + self: TensorLikeType, + offset: int = 0, +) -> TensorLikeType: + ndim = self.dim() + torch._check( + ndim in (1, 2), lambda: f"diag(): Supports 1D or 2D tensors. Got {ndim}D" + ) + if ndim == 1: + return torch.diag_embed(self, offset) + else: + return torch.diagonal_copy(self, offset) + + +@register_decomposition(aten.diagonal_scatter) +@out_wrapper() +def diagonal_scatter( + input: TensorLikeType, + src: TensorLikeType, + offset: int = 0, + dim1: int = 0, + dim2: int = 1, +) -> TensorLikeType: + out = utils.clone_preserve_strides(input) + diag = out.diagonal(offset, dim1, dim2) + torch._check( + diag.shape == src.shape, + lambda: "expected src to have a size equal to the diagonal of the input." + f"Got {src.shape} for a diagonal of shape {diag.shape}", + ) + copy_to(diag, src) + return out + + +@register_decomposition(aten.diagonal) +def diagonal( + self: TensorLikeType, + offset: int = 0, + dim1: int = 0, + dim2: int = 1, +) -> TensorLikeType: + """ + Reference implementation of torch.diagonal + """ + num_dims = self.dim() + dim1 = utils.canonicalize_dim(idx=dim1, rank=num_dims) + dim2 = utils.canonicalize_dim(idx=dim2, rank=num_dims) + + torch._check( + dim1 != dim2, lambda: f"diagonal dimensions cannot be identical {dim1}, {dim2}" + ) + + storage_offset = self.storage_offset() + + if offset >= 0: + diag_size = max(min(self.size()[dim1], self.size()[dim2] - offset), 0) + else: + diag_size = max(min(self.size()[dim1] + offset, self.size()[dim2]), 0) + + if diag_size > 0: + if offset >= 0: + storage_offset += offset * self.stride()[dim2] + else: + storage_offset -= offset * self.stride()[dim1] + + sizes = [s for i, s in enumerate(self.size()) if i not in (dim1, dim2)] + sizes.append(diag_size) + + strides = [s for i, s in enumerate(self.stride()) if i not in (dim1, dim2)] + strides.append(self.stride()[dim1] + self.stride()[dim2]) + + result = self.as_strided(size=sizes, stride=strides, storage_offset=storage_offset) + + return result + + +diagonal_copy = _make_copy_from_view(diagonal) + + +@register_decomposition(aten.diag_embed) +@out_wrapper() +def diag_embed( + t: TensorLikeType, + offset: int = 0, + dim1: int = -2, + dim2: int = -1, +) -> TensorLikeType: + """ + Reference implementation of torch.diag_embed + """ + # convert from negative dims + rank = t.ndim + 1 + dim1 = utils.canonicalize_dim(rank=rank, idx=dim1) + dim2 = utils.canonicalize_dim(rank=rank, idx=dim2) + + # as per the docs, exchanging dims is equivalent to changing the sign of + # offset + if dim1 > dim2: + dim1, dim2 = dim2, dim1 + offset = -offset + + torch._check( + dim1 != dim2, lambda: f"diagonal dimensions cannot be identical {dim1}, {dim2}" + ) + + # as per the docs, the size of last dim is placed at dim1 and dim2 + last_dim = t.size(-1) + + if offset != 0: + # add padding to match the new size + t_shape = list(t.shape) + t_shape[-1] = builtins.abs(offset) + z = torch.zeros(t_shape, dtype=t.dtype, device=t.device, requires_grad=False) + pair = (z, t) if offset > 0 else (t, z) + t = torch.cat(pair, dim=-1) + # make sure the diagonal always has the same size + last_dim += builtins.abs(offset) + + # preserve original data, but place 1 at dim1 and move last dim to dim2 + t = t.unsqueeze(dim1).movedim(-1, dim2) + + # generate ranges shifting indices based on offset + a_range = torch.arange(last_dim, device=t.device, dtype=torch.int64) + b_range = torch.arange( + offset, last_dim + offset, device=t.device, dtype=torch.int64 + ) + + # broadcast + cond = a_range == b_range.unsqueeze(-1) + cond_shape = [last_dim if i in (dim1, dim2) else 1 for i in range(len(t.shape))] + cond = cond.reshape(cond_shape) + + # aten.diag_embed always returns a new contiguous tensor + # contiguous() is needed to correctly model the output stride + return utils.mask_tensor(cond, t).contiguous() + + +@register_decomposition(aten.block_diag) +@out_wrapper() +def _block_diag_iterable(tensors: List[TensorLikeType]) -> TensorLikeType: + """ + Reference implementation of torch.block_diag + """ + tensors_2d = [ + tensor.view(1, -1) if tensor.dim() <= 1 else tensor for tensor in tensors + ] + + ncols = builtins.sum(tensor.shape[1] for tensor in tensors_2d) + device = tensors_2d[0].device + + result = [] + + col_start = 0 + for i, tensor in enumerate(tensors_2d): + torch._check( + tensor.dim() == 2, + lambda: "Input tensors must have 2 or fewer dimensions. " + f"Input {i} has {tensor.dim()} dimensions", + ) + torch._check( + tensor.device == device, + lambda: "Input tensors must all be on the same device. " + f"Input 0 is on device {device} and input {i} is on device {tensor.device}.", + ) + row, col = tensor.shape + left = torch.zeros((row, col_start), device=device, dtype=tensor.dtype) + right = torch.zeros( + (row, ncols - col_start - col), device=device, dtype=tensor.dtype + ) + result += [torch.cat((left, tensor, right), dim=1)] + col_start += col + + return torch.cat(result, dim=0) + + +def block_diag(*tensors: List[TensorLikeType]) -> TensorLikeType: + """ + This is used as an input to PythonRefInfo. `torch.block_diag` + expects arguments splatted, but `aten.block_diag` expects only + one argument that is a list of Tensors. + """ + return _block_diag_iterable(tensors) + + +# CompositeImplicitAutograd - don't register decomp +def dsplit(a: TensorLikeType, sections: DimsType) -> TensorSequenceType: + if a.ndim < 3: + raise RuntimeError( + f"torch.dsplit requires a tensor with at least 3 dimension, but got a tensor with {a.ndim} dimensions!" + ) + if isinstance(sections, IntLike) and (sections == 0 or a.shape[2] % sections != 0): + raise RuntimeError( + "torch.dsplit attempted to split along dimension 2, " + + f"but the size of the dimension {a.shape[2]} is not divisible by the split_size {sections}!" + ) + return tensor_split(a, sections, 2) + + +@register_decomposition(aten.t.default) +def t(a: TensorLikeType): + # TODO: Add sparse support + # if a.is_sparse: + # sparse_dim = a.sparse_dim() + # dense_dim = a.dense_dim() + # if not (sparse_dim <= 2 and dense_dim == 0): + # raise RuntimeError( + # f"t() expects a tensor with <= 2 sparse and 0 dense dimensions, but got {sparse_dim} sparse and" + # f"{dense_dim} dense dimensions" + # ) + if a.ndim > 2: + raise RuntimeError( + f"t() expects a tensor with <= 2 dimensions, but self is {a.ndim}D" + ) + return torch.transpose(a, 0, 0 if a.ndim < 2 else 1) + + +# CompositeImplicitAutograd - don't register decomp +def T(a: TensorLikeType) -> TensorLikeType: + # n != 2 && n != 0 is deprecated in regular PyTorch. + torch._check( + a.ndim in (0, 2), + lambda: ( + "The use of `x.T` on tensors of dimension other than 0 or 2 " + "to reverse their shape is not supported." + ), + ) + return a.t() + + +@register_decomposition(aten.alias) +def alias(a: TensorLikeType) -> TensorLikeType: + return prims.view_of(a) + + +@register_decomposition(aten.transpose) +def transpose(a: TensorLikeType, dim0: int, dim1: int) -> TensorLikeType: + _dim0, _dim1 = utils.canonicalize_dims(a.ndim, (dim0, dim1)) # type: ignore[misc] + + if a.ndim <= 1 or dim0 == dim1: + return aten.alias.default(a) + + _permutation = list(range(0, a.ndim)) + _permutation[_dim0] = _dim1 + _permutation[_dim1] = _dim0 + return torch.permute(a, _permutation) + + +# Aliases for transpose +swap_axes = transpose + + +@register_decomposition(aten.unfold) +def unfold( + self: TensorLikeType, dimension: int, size: int, step: int +) -> TensorLikeType: + shape, strides = _get_unfold_shape_stride( + self.shape, self.stride(), dimension, size, step + ) + return self.as_strided(shape, strides) + + +@register_decomposition(aten.unfold_copy) +@out_wrapper() +def unfold_copy(self: TensorLikeType, dimension: int, size: int, step: int): + return self.unfold(dimension, size, step).clone( + memory_format=torch.contiguous_format + ) + + +def _cumsumprod_common( + func, + init, + a: TensorLikeType, + dim: int, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + # We implement all the kwargs of a reduction. ATen just handles dtype + # nb. This decomposition may not be as efficient as a backend-specific implementation + ndim = a.ndim + dim = utils.canonicalize_dim(ndim, dim) + if ndim == 0: + return func(a.unsqueeze(0), dim=0, dtype=dtype, out=out) + a = a.unsqueeze(dim + 1) + rg = torch.arange(a.shape[dim], device=a.device) + mask = rg.unsqueeze(1) <= rg + for _ in range(ndim - dim - 1): + mask = mask.unsqueeze(-1) + masked_a = torch.where(mask, a, init) + return func(masked_a, dim=dim, dtype=dtype, out=out) + + +@register_decomposition(aten.cumsum) +def cumsum( + a: TensorLikeType, + dim: int, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + return _cumsumprod_common(func=sum, init=0, a=a, dim=dim, dtype=dtype, out=out) + + +@register_decomposition(aten.cumprod) +def cumprod( + a: TensorLikeType, + dim: int, + *, + dtype: Optional[torch.dtype] = None, + out: Optional[Tensor] = None, +) -> TensorLikeType: + return _cumsumprod_common(func=prod, init=1, a=a, dim=dim, dtype=dtype, out=out) + + +# Note: although squeeze is documented as having the out= kwarg it doesn't +@register_decomposition(aten.unsqueeze) +def unsqueeze(a: TensorLikeType, dim: int) -> TensorLikeType: + # Note that unsqueeze canonicalizes with rank + 1 because it allows + # a new innermost dimension to be specified + ndim = a.ndim + 1 + dim = utils.canonicalize_dim(ndim, dim) + return prims.expand_dims(a, (dim,), ndim=ndim) + + +# NOTE: shape is a vararg because Tensor.reshape can be called with as +# Tensor.view(a, b, c) or Tensor.view((a, b, c)) Function call torch.view +# doesn't support unpacked shapes +# TODO: Turn this into a decomposition (currently fails on reshape meta tests) +@register_decomposition(aten.view.default) +def view(a: TensorLikeType, *shape: ShapeType) -> TensorLikeType: + return _reshape_view_helper(a, *shape, allow_copy=False) + + +# CompositeImplicitAutograd - don't register decomp +def view_as(self: TensorLikeType, other: TensorLikeType) -> TensorLikeType: + return self.view(other.size()) + + +# CompositeImplicitAutograd - don't register decomp +def ravel(a: TensorLikeType) -> TensorLikeType: + return reshape(a, (-1,)) + + +# CompositeImplicitAutograd - don't register decomp +# missing ref impl. for aten.gather +@out_wrapper() +def take_along_dim( + a: torch.Tensor, indices: torch.Tensor, dim: Optional[int] = None +) -> torch.Tensor: + torch._check( + a.ndim == indices.ndim, + lambda: ( + "torch.take_along_dim(): input and indices should have the same " + f"number of dimensions, but got {a.ndim} dimensions for input, and " + f"{indices.ndim} dimensions for indices" + ), + ) + + torch._check( + utils.is_integer_dtype(indices.dtype), + lambda: ( + "torch.take_along_dim(): dtype of indices should be int but got " + f"{indices.dtype} instead" + ), + ) + + if dim is None: + return torch.gather(a.view(-1), 0, indices.view(-1)) + else: + self_sizes = list(a.shape) + self_sizes[dim] = indices.size(dim) + broadcast_shape = utils.infer_size_shapes(self_sizes, indices.size()) + indices_broadcast = broadcast_to(indices, broadcast_shape) + + indices_sizes = list(indices.shape) + indices_sizes[dim] = a.size(dim) + broadcast_shape = utils.infer_size_shapes(indices_sizes, a.size()) + self_broadcast = broadcast_to(a, broadcast_shape) + + return torch.gather(self_broadcast, dim, indices_broadcast) + + +@out_wrapper() +def empty( + *shape, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + requires_grad: bool = False, + pin_memory: bool = False, + memory_format: torch.memory_format = torch.contiguous_format, +) -> TensorLikeType: + torch._check( + memory_format != torch.preserve_format, + lambda: "torch.empty: the Preserve memory format is not supported", + ) + + shape = utils.extract_shape_from_varargs(shape) + + if memory_format == torch.contiguous_format: + strides = utils.make_contiguous_strides_for(shape) + elif memory_format == torch.channels_last_3d: + strides = utils.make_channels_last_3d_strides_for(shape) + else: # memory_format == torch.channels_last + torch._check( + memory_format == torch.channels_last, + lambda: f"torch.empty: received an unknown memory format {memory_format}!", + ) + strides = utils.make_channels_last_2d_strides_for(shape) + + return torch.empty_strided( + shape, + strides, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@out_wrapper() +def empty_permuted( + shape, + physical_layout, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + requires_grad: bool = False, + pin_memory: bool = False, +) -> TensorLikeType: + return prims.empty_permuted( + shape, + physical_layout, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.new_empty) +@out_wrapper() +def new_empty( + a: TensorLikeType, + size: ShapeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.empty( + size, + dtype=dtype, + device=device, + pin_memory=pin_memory, + layout=layout, + ) + + +@register_decomposition(aten.new_empty_strided) +@out_wrapper() +def new_empty_strided( + a: TensorLikeType, + size: ShapeType, + stride: StrideType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.Tensor.new_empty_strided + """ + + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.empty_strided( + size, + stride, + dtype=dtype, + device=device, + pin_memory=pin_memory, + layout=layout, + ) + + +@register_decomposition(aten.zeros.default) +@out_wrapper() +def zeros( + *size, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + size = utils.extract_shape_from_varargs(size) + + if dtype is None: + dtype = torch.get_default_dtype() + + return torch.full( + size, + False if dtype == torch.bool else 0, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.new_zeros) +@out_wrapper() +def new_zeros( + a: TensorLikeType, + size: ShapeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.full( + size, + False if (dtype or a.dtype) == torch.bool else 0, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.ones.default) +@out_wrapper() +def ones( + *size, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + size = utils.extract_shape_from_varargs(size) + + if dtype is None: + dtype = torch.get_default_dtype() + + return torch.full( + size, + True if dtype == torch.bool else 1, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.new_ones) +@out_wrapper() +def new_ones( + a: TensorLikeType, + size: ShapeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.full( + size, + True if (dtype or a.dtype) == torch.bool else 1, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.new_full) +@out_wrapper() +def new_full( + a: TensorLikeType, + size: ShapeType, + fill_value: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + return torch.full( + size, + fill_value, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + ) + + +@register_decomposition(aten.empty_like) +@out_wrapper() +def empty_like( + a: TensorLikeType, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: Optional[torch.layout] = None, + pin_memory: bool = False, + requires_grad: bool = False, + memory_format: torch.memory_format = torch.preserve_format, +) -> TensorLikeType: + dtype = a.dtype if dtype is None else dtype + layout = a.layout if layout is None else layout + device = a.device if device is None else device + + if memory_format != torch.preserve_format: + return torch.empty( + a.shape, + dtype=dtype, + layout=layout, + device=device, + requires_grad=requires_grad, + pin_memory=pin_memory, + memory_format=memory_format, + ) + + # memory_format == torch.preserve_format + logical_to_physical_perm = ( + utils.compute_elementwise_output_logical_to_physical_perm(a) + ) + # identity perm is [2, 1, 0] + return torch.empty_permuted( + a.shape, + logical_to_physical_perm, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + + +@register_decomposition([aten.arange.start_step, aten.arange.start_out]) +@out_wrapper() +def arange( + start: NumberType = 0, + end: Optional[NumberType] = None, + step: NumberType = 1, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + utils.check_layout(layout) + utils.check_pin_memory(pin_memory) + device = torch.device(utils.device_or_default(device)) + + assert not isinstance(start, complex) + assert not isinstance(end, complex) + assert not isinstance(step, complex) + + # Case: torch.arange(5) + if end is None: + end = start + start = 0 + torch._check(step != 0, lambda: "step must be nonzero") + if step > 0: + torch._check( + end >= start, + lambda: "upper bound and lower bound inconsistent with step sign", + ) + elif step < 0: + torch._check( + end <= start, + lambda: "upper bound and lower bound inconsistent with step sign", + ) + + def is_finite(x): + return not isinstance(x, FloatWithoutSymFloat) or math.isfinite(x) + + torch._check( + is_finite(start) and is_finite(end), + lambda: f"unsupported range: {start} -> {end}", + ) + torch._check( + is_finite(step), + lambda: f"step must be finite but got {step}", + ) + + if dtype is None: + args = (start, end, step) + integer_args = builtins.all(isinstance(arg, IntLike) for arg in args) + dtype = torch.int64 if integer_args else torch.get_default_dtype() + + is_integer = utils.is_integer_dtype(dtype) + if is_integer: + xstart = sym_int(start) + xend = sym_int(end) + xstep = sym_int(step) + + # For int64 we truncate arguments to int before calculating length, but + # other integral dtypes we don't. Weird... but needed to match ATen shapes. + if dtype == torch.int64: + # Uses floordiv to avoid ceil in inductor. + sgn = bool(xstep > 0) - bool(xstep < 0) # type: ignore[possibly-undefined] + length = (xend - xstart + xstep - sgn) // xstep # type: ignore[possibly-undefined] + else: + length = math.ceil((end - start) / step) + + if is_integer: + return prims.iota( + length, + start=xstart, # type: ignore[possibly-undefined] + step=xstep, # type: ignore[possibly-undefined] + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + computation_dtype = utils.get_acc_type(dtype, device) + index = prims.iota( + length, + start=0, + step=1, + dtype=torch.int64, + device=device, + requires_grad=False, + ) + index = _maybe_convert_to_dtype(index, computation_dtype) + result = start + step * index + result = _maybe_convert_to_dtype(result, dtype) + + if requires_grad: + result.requires_grad_(True) + return result + + +@register_decomposition(aten.lerp) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("start", "end", "weight"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def lerp(start: Tensor, end: Tensor, weight: Union[Tensor, NumberType]): + inputs = [start, end] + if isinstance(weight, Number): + weight = start.new_full((), weight) # type: ignore[arg-type] + else: + inputs.append(weight) + assert isinstance(weight, Tensor) # mypy + # We implement it this way for numerical stability. We assume (in the stability optimisation) + # that 0 <= weight <= 1. We take the abs to deal with complex numbers + # We want to perform operations near zero, which is where floating points are most precise + # thus, we perform the following optimisation: + # If weight.abs() >= 0.5: + # return (1 - weight) * (start - end) + end + mask = weight.abs() >= 0.5 + coeff = torch.where(mask, weight - 1, weight) + base = torch.where(mask, end, start) + output = coeff * (end - start) + base + # make sure the decomposition output's stride is same as non-decomposition path. + stride = utils.compute_elementwise_output_strides(*_maybe_broadcast(*inputs)) + if output.stride() != stride: + output = prims.copy_strided(output, stride) + + return handle_noncontiguous_outputs(inputs, output) + + +@register_decomposition(aten.linspace) +@out_wrapper() +def linspace( + start: Union[NumberType, TensorLikeType], + end: Union[NumberType, TensorLikeType], + steps: NumberType, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: torch.layout = torch.strided, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + if isinstance(start, TensorLikeType): + torch._check( + start.dim() == 0, + lambda: "linspace only supports 0-dimensional start and end tensors", + ) + start = _maybe_convert_to_dtype(start, torch.float64) + if isinstance(end, TensorLikeType): + torch._check( + end.dim() == 0, + lambda: "linspace only supports 0-dimensional start and end tensors", + ) + end = _maybe_convert_to_dtype(end, torch.float64) + + if py_any(isinstance(arg, complex) for arg in (start, end, steps)): + default_complex_dtype = utils.corresponding_complex_dtype( + torch.get_default_dtype() + ) + if dtype is None: + dtype = default_complex_dtype + else: + torch._check( + utils.is_complex_dtype(dtype), + lambda: f"linspace(): inferred dtype {default_complex_dtype} can't be safely cast to passed dtype {dtype}", + ) + else: + dtype = dtype or torch.get_default_dtype() + assert isinstance(dtype, torch.dtype) + + # steps does not participate in the computation of the dtype + torch._check_type( + isinstance(steps, IntLike), + lambda: f"received an invalid combination of arguments - got \ +({type(start).__name__}, {type(end).__name__}, {type(steps).__name__})", + ) + assert isinstance(steps, IntLike) # for mypy + torch._check(steps >= 0, lambda: "number of steps must be non-negative") + + factory_kwargs = { + "layout": layout, + "device": device, + "pin_memory": pin_memory, + "requires_grad": requires_grad, + } + if steps == 0: + return torch.full((0,), 0, dtype=dtype, **factory_kwargs) # type: ignore[arg-type] + if steps == 1: + if isinstance(start, TensorLikeType): + return torch.empty((steps,), dtype=dtype, **factory_kwargs).copy_(start) # type: ignore[arg-type] + else: + return torch.full((steps,), start, dtype=dtype, **factory_kwargs) # type: ignore[arg-type] + + # Perform in arange in int because some backends like ATen or Triton do not support all the dtypes + rg = torch.arange(0, steps, **factory_kwargs) # type: ignore[arg-type] + + # Small types need to be computed in higher precision as this is, at heart, an associative scan + dtype_red = ( + torch.int64 + if (utils.is_boolean_dtype(dtype) or utils.is_integer_dtype(dtype)) + else dtype + ) + computation_dtype, _ = utils.reduction_dtypes( + rg, REDUCTION_OUTPUT_TYPE_KIND.SAME, dtype_red + ) + cast_rg = partial(_maybe_convert_to_dtype, dtype=computation_dtype) + + # We implement torch.lerp without performing rg / (steps - 1) explicitly + # With this we get out[0] == start, out[-1] == end + step = (end - start) / (steps - 1) + out = torch.where( + rg < steps / 2, + start + step * cast_rg(rg), # type: ignore[arg-type,operator] + end - step * cast_rg((steps - 1) - rg), # type: ignore[arg-type,operator] + ) + return _maybe_convert_to_dtype(out, dtype) # type: ignore[return-value] + + +@register_decomposition(aten.logspace) +@out_wrapper() +def logspace( + start: Union[NumberType, TensorLikeType], + end: Union[NumberType, TensorLikeType], + steps: NumberType, + base: NumberType = 10, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: torch.layout = torch.strided, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + if dtype is None: + dtype = torch.get_default_dtype() + + # NB: NumPy doesn't have this cast + if prims.utils.is_integer_dtype(dtype): + if isinstance(start, FloatLike): + start = sym_int(start) + elif isinstance(start, TensorLikeType): + torch._check( + start.dim() == 0, + lambda: "logspace only supports 0-dimensional start and end tensors", + ) + start = _maybe_convert_to_dtype(start, dtype) + if isinstance(end, FloatLike): + end = sym_int(end) + elif isinstance(end, TensorLikeType): + torch._check( + end.dim() == 0, + lambda: "logspace only supports 0-dimensional start and end tensors", + ) + end = _maybe_convert_to_dtype(end, dtype) + + if py_any(isinstance(arg, complex) for arg in (start, end, steps)): + default_complex_dtype = utils.corresponding_complex_dtype( + torch.get_default_dtype() + ) + dtype = default_complex_dtype + _dtype = None # torch.linspace will update the correct dtype + else: + _dtype = torch.float64 + + assert not isinstance(base, complex) # for mypy + if base < 0: + raise NotImplementedError + ret = torch.linspace( # type: ignore[misc] + start, # type: ignore[arg-type] + end, # type: ignore[arg-type] + steps, # type: ignore[arg-type] + dtype=_dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + return _maybe_convert_to_dtype(torch.pow(base, ret), dtype) # type: ignore[arg-type,return-value] + + +@overload +def meshgrid(tensors: Sequence[TensorLikeType], indexing: str): + pass + + +@overload +def meshgrid(*tensors: TensorLikeType, indexing: str): + pass + + +@register_decomposition(aten.meshgrid) +def meshgrid( + *tensors: Union[TensorLikeType, List[TensorLikeType], Tuple[TensorLikeType]], + indexing: str, +) -> List[TensorLikeType]: + # This ref simultaneously handles two overloads (see stubs above) + # The `indexing` argument is currently optional for torch.meshgrid, but we + # plan to make the argument required: https://github.com/pytorch/pytorch/issues/50276 + if isinstance(tensors[0], (list, tuple)): + assert len(tensors) == 1 + tensors = tuple(tensors[0]) + + torch._check( + py_all(isinstance(a, TensorLike) for a in tensors), + lambda: "meshgrid expects its inputs to be tensors", + ) + + torch._check(len(tensors) > 0, lambda: "meshgrid expects a non-empty TensorList") + + for i in range(len(tensors) - 1): + torch._check( + tensors[i].dtype == tensors[i + 1].dtype, # type: ignore[union-attr] + lambda: "meshgrid expects all tensors to have the same dtype", + ) + torch._check( + tensors[i].device == tensors[i + 1].device, # type: ignore[union-attr] + lambda: "meshgrid expects all tensors to have the same device", + ) + + swap_first_and_second_tensors = False + if indexing == "xy": + swap_first_and_second_tensors = len(tensors) >= 2 + if swap_first_and_second_tensors: + tensors = (tensors[1], tensors[0], *tensors[2:]) + else: + torch._check( + indexing == "ij", + lambda: ( + 'torch.meshgrid: indexing must be one of "xy" or "ij", ' + f"but received: {indexing}" + ), + ) + + result_shape: List[int] = [] + for t in tensors: + assert isinstance(t, TensorLike) # mypy + torch._check( + t.ndim == 0 or t.ndim == 1, + lambda: f"torch.meshgrid: Expected 0D or 1D tensor in the tensor list but got: {t}", + ) + result_shape.append(t.numel()) + + grids: List[TensorLikeType] = [] + for i, t in enumerate(tensors): + assert isinstance(t, TensorLike) # mypy + if t.ndim == 0: + t = t.view((1,)) + grids.append(prims.broadcast_in_dim(t, result_shape, (i,))) + + if swap_first_and_second_tensors: + # Swap outputs if we originally swapped at the beginning + grids[0], grids[1] = grids[1], grids[0] + + return grids + + +# CompositeImplicitAutograd - don't register decomp +def movedim( + input: TensorLikeType, + source: Union[int, DimsSequenceType], + destination: Union[int, DimsSequenceType], +) -> TensorLikeType: + """ + Reference implementation of torch.movedim + """ + if type(source) is int: + source = (source,) + if type(destination) is int: + destination = (destination,) + + # Converts to list to produce a compatible error message with core PyTorch, + # which prints sequences in square brackets. + torch._check( + len(source) == len(destination), # type: ignore[arg-type] + lambda: ( + "movedim: Invalid source or destination dims: source " # type: ignore[arg-type] + f"({list(source)} dims) should contain the same number " # type: ignore[arg-type] + f"of dims as destination ({list(destination)} dims)" # type: ignore[arg-type] + ), + ) + + rank = input.ndim + ss = tuple(utils.canonicalize_dims(rank=rank, indices=source)) # type: ignore[arg-type] + ds = tuple(utils.canonicalize_dims(rank=rank, indices=destination)) # type: ignore[arg-type] + + sss = set(ss) + dss = set(ds) + + # See above on why this converts to list in error messages. + torch._check( + len(ss) == len(sss), + lambda: f"movedim: repeated dim in `source` ({list(source)})", # type: ignore[arg-type] + ) + torch._check( + len(ds) == len(dss), + lambda: f"movedim: repeated dim in `destination` ({list(destination)})", # type: ignore[arg-type] + ) + + m = dict(zip(ds, ss)) + dims = [] + si = 0 # source index + for di in range(rank): + # check if the destination index is in the mapping + s = m.get(di) + if s is not None: + # insert source index if found + dims.append(s) + else: + # insert source index sequentially, skipping indices from the mapping + while si in sss: + si += 1 + dims.append(si) + si += 1 + + result = torch.permute(input, tuple(dims)) + + return result + + +# NOTE: for convenience, shape can be a tuple of ints or a tuple containing a tuple of ints +@register_decomposition(aten.empty_strided) +@out_wrapper() +def empty_strided( + shape: Union[ShapeType, Tuple[ShapeType]], + strides: StrideType, + *, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: torch.layout = torch.strided, + requires_grad: bool = False, + pin_memory: bool = False, +) -> TensorLikeType: + # Layout == strided, pin_memory is False + utils.check_layout(layout) + utils.check_pin_memory(pin_memory) + + shape = utils.extract_shape_from_varargs(shape) + dtype = torch.get_default_dtype() if dtype is None else dtype + device = torch.device("cpu") if device is None else device + + return prims.empty_strided( + shape, + strides, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + +@register_decomposition(aten.eye) +@out_wrapper() +def eye( + n: int, + m: Optional[int] = None, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, # TODO: unused +) -> TensorLikeType: + """ + Reference implementation of torch.eye + """ + if m is None: + m = n + + torch._check(n >= 0, lambda: f"n must be greater or equal to 0, got {n}") + torch._check(m >= 0, lambda: f"m must be greater or equal to 0, got {m}") + + range_n = torch.arange(n, dtype=torch.int64, device=device, requires_grad=False) + range_m = torch.arange(m, dtype=torch.int64, device=device, requires_grad=False) + + cond = range_n.unsqueeze(-1) == range_m + if dtype is torch.bool: + return cond + else: + one = torch.ones( + (1,), + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=False, + ) + return torch.where(cond, one, 0) + # TODO: Use requires_grad. All refs taking the requires_grad kwarg must + # return a leaf tensor. + # result.requires_grad_(requires_grad) + + +@register_decomposition([aten.full.default, aten.full.out]) +@out_wrapper() +def full( + shape: ShapeType, + fill_value: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, +) -> TensorLikeType: + utils.check_layout(layout) + utils.check_pin_memory(pin_memory) + + dtype = dtype if dtype is not None else utils.type_to_dtype(type(fill_value)) + device = device if device is not None else torch.device("cpu") + + e = empty( + shape, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + ) + return torch.fill(e, fill_value) # type: ignore[arg-type] + + +def full_like( + a: TensorLikeType, + fill_value: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, + memory_format: torch.memory_format = torch.preserve_format, +) -> TensorLikeType: + e = torch.empty_like( + a, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + memory_format=memory_format, + ) + return fill(e, fill_value) + + +@register_decomposition(aten.zeros_like) +@out_wrapper() +def zeros_like( + a: TensorLikeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, + memory_format: torch.memory_format = torch.preserve_format, +) -> TensorLikeType: + return torch.full_like( + a, + False if (dtype or a.dtype) == torch.bool else 0, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + memory_format=memory_format, + ) + + +@register_decomposition(aten.ones_like) +@out_wrapper() +def ones_like( + a: TensorLikeType, + *, + dtype: Optional[torch.dtype] = None, + layout: Optional[torch.layout] = None, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, + requires_grad: bool = False, + memory_format: torch.memory_format = torch.preserve_format, +) -> TensorLikeType: + return torch.full_like( + a, + True if (dtype or a.dtype) == torch.bool else 1, + dtype=dtype, + layout=layout, + device=device, + pin_memory=pin_memory, + requires_grad=requires_grad, + memory_format=memory_format, + ) + + +@register_decomposition(aten.randn.default) +@out_wrapper() +def randn( + *shape, + dtype: Optional[torch.dtype] = None, + device: Optional[DeviceLikeType] = None, + layout: Optional[torch.layout] = None, + requires_grad: bool = False, + pin_memory: bool = False, +) -> TensorLikeType: + utils.check_pin_memory(pin_memory) + + shape_ = utils.extract_shape_from_varargs(shape) + + dtype = utils.dtype_or_default(dtype) + device = utils.device_or_default(device) + + return prims.normal( + shape_, + mean=0.0, + std=1.0, + dtype=dtype, + device=device, + requires_grad=requires_grad, + ) + + +def scalar_tensor( + a: NumberType, + *, + dtype: Optional[torch.dtype] = None, + layout: torch.layout = torch.strided, + device: Optional[DeviceLikeType] = None, + pin_memory: bool = False, +) -> TensorLikeType: + utils.check_layout(layout) + utils.check_pin_memory(pin_memory) + dtype = dtype if dtype is not None else utils.type_to_dtype(type(a)) + device = device if device is not None else torch.device("cpu") + return prims.scalar_tensor(a, dtype=dtype, device=device) + + +# +# Randomness References +# + + +def _uniform_helper( + shape: ShapeType, + low: Union[bool, int, float] = 0.0, + high: Union[bool, int, float] = 1.0, + *, + dtype: torch.dtype, + device: DeviceLikeType, +) -> TensorLikeType: + utils.validate_shape(shape) + + assert isinstance(low, Number) + assert isinstance(high, Number) + low = sym_float(low) + high = sym_float(high) + + assert isinstance(dtype, torch.dtype) + device = utils.canonicalize_device(device) + + return prims._uniform_helper(shape, low=low, high=high, dtype=dtype, device=device) + + +@register_decomposition(aten.masked_fill) +@out_wrapper() +def masked_fill(a: TensorLikeType, mask: TensorLikeType, value: TensorOrNumberLikeType): + python_type = utils.dtype_to_type(a.dtype) + if isinstance(value, Number): + value_type = type(value) + else: + # NOTE: Could not use value = item(value) as it resulted in + # RuntimeError: Cannot cast FakeTensor(cpu) to number + value_ndim = value.ndim + torch._check( + value_ndim == 0, + lambda: f"only supports a 0-dimensional value tensor, but got tensor with {value_ndim} dimension", + ) + # `masked_fill` allows cpu scalar to be moved to cuda and xpu but not otherwise. + is_cpu_scalar = a.device.type in ["cuda", "xpu"] and value.device.type == "cpu" + torch._check( + is_cpu_scalar or value.device == a.device, + lambda: "Expected `value` to be on same device as `a`", + ) + value_type = utils.dtype_to_type(value.dtype) + + if value_type is complex: + # only downcasting from complex to lower type is not allowed. + # We allow casting `value` to lower type for other case + # Eg. float -> int. + # Ref: https://github.com/pytorch/pytorch/issues/79195 + torch._check( + utils.is_weakly_lesser_type(value_type, python_type), + lambda: f"could not convert to type {python_type} without overflow", + ) + + # Since `where` allows type-promotion, + # cast value to correct type before passing to `where` + value = _maybe_convert_to_dtype(value, a.dtype) + r = torch.where(mask, value, a) # type: ignore[arg-type] + + # aten.mask_fill always return a new contiguous tensor + # contiguous() is needed to correctly model the output stride + return r.contiguous() + + +@register_decomposition(aten.masked_fill_) +def masked_fill_( + a: TensorLikeType, mask: TensorLikeType, value: TensorOrNumberLikeType +) -> TensorLikeType: + b = torch.masked_fill(a, mask, value) # type: ignore[arg-type] + a.copy_(b) + return a + + +# CompositeImplicitAutograd - don't register decomp +def allclose( + a: TensorLikeType, + b: TensorLikeType, + rtol: float = 1e-05, + atol: float = 1e-08, + equal_nan: bool = False, +) -> bool: + """ + Reference implementation of torch.allclose + """ + _check_close_args(name="torch.allclose", a=a, b=b, rtol=rtol, atol=atol) + + return bool( + torch.all(torch.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)).item() + ) + + +def equal(a: TensorLikeType, b: TensorLikeType) -> bool: + utils.check_same_device(a, b, allow_cpu_scalar_tensors=False) + utils.check_same_dtype(a, b) + + # Shape check + if a.ndim != b.ndim: + return False + + for x, y in zip(a.shape, b.shape): + if x != y: + return False + + # Short-circuits if there are no elements to validate + if a.numel() == 0: + return True + + return item(all(eq(a, b))) # type: ignore[return-value] + + +@register_decomposition(aten.norm) +@out_wrapper(exact_dtype=True) +def norm( + input: TensorLikeType, + p: Optional[Union[float, str]] = "fro", + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # In these cases we compute the "Frobenius norm" + if ( + p == "fro" and (dim is None or isinstance(dim, Dim) or len(dim) <= 2) + ) or p is None: + p = 2 + if isinstance(dim, Dim): + dim = [dim] + if isinstance(p, str): + # Here we either call the nuclear norm, or we call matrix_norm with some arguments + # that will throw an error + if dim is None: + dim = tuple(range(input.ndim)) + return torch.linalg.matrix_norm(input, p, dim, keepdim, dtype=dtype) + else: + return torch.linalg.vector_norm(input, p, dim, keepdim, dtype=dtype) + + +@register_decomposition(aten.trace) +@out_wrapper() +def trace(self: TensorLikeType) -> TensorLikeType: + torch._check( + self.ndim == 2, lambda: "expected a matrix, but got tensor with dim {self.ndim}" + ) + return torch.sum(torch.diag(self, 0)) + + +def _make_r_binary_op(base_op): + def rop( + a: Union[TensorLikeType, NumberType], + b: Union[TensorLikeType, NumberType], + ) -> TensorLikeType: + return base_op(b, a) + + return rop + + +rtruediv = _make_r_binary_op(true_divide) +rfloordiv = _make_r_binary_op(floor_divide) +rpow = _make_r_binary_op(pow) + + +@register_decomposition(aten.triu) +@out_wrapper() +def triu(a: TensorLikeType, diagonal: int = 0) -> TensorLikeType: + torch._check( + a.ndim >= 2, lambda: "triu: input tensor must have at least 2 dimensions" + ) + h, w = a.shape[-2:] + mask = ( + torch.arange(w, device=a.device).unsqueeze(-2) + - torch.arange(h, device=a.device).unsqueeze(-1) + ) >= diagonal + + # aten.triu always returns a new contiguous tensor + # contiguous() is needed to correctly model the output stride + return utils.mask_tensor(mask, a).contiguous() + + +@register_decomposition(aten.tril) +@out_wrapper() +def tril(a: TensorLikeType, diagonal: int = 0) -> TensorLikeType: + torch._check( + a.ndim >= 2, lambda: "tril: input tensor must have at least 2 dimensions" + ) + h, w = a.shape[-2:] + mask = ( + torch.arange(w, device=a.device).unsqueeze(-2) + - torch.arange(h, device=a.device).unsqueeze(-1) + ) <= diagonal + + # aten.tril always returns a new contiguous tensor + # contiguous() is needed to correctly model the output stride + return utils.mask_tensor(mask, a).contiguous() + + +# This is based on get_tril_size in aten/src/ATen/native/TensorFactories.h +# The components of the matrix that belong to the lower triangle with offset +# form a pentagon that can be broken down into a top trapezoid and a bottom +# rectangle. For the implementation of tril_indices, we need the sizes of +# both of these, as well as the length of the top side of the trapezoid. +def _get_tril_sizes(row: int, col: int, offset: int) -> Tuple[int, int, int]: + if row == 0 or col == 0: + return 0, 0, 0 + + m_first_row = min(col, 1 + offset) if offset > 0 else int(row + offset > 0) + m_last_row = max(0, min(col, row + offset)) + n_row_all = max(0, min(row, row + offset)) + n_row_trapezoid = m_last_row - m_first_row + 1 + + # Number of elements in top trapezoid + trapezoid_size = (m_first_row + m_last_row) * n_row_trapezoid // 2 + # Number of elements in bottom rectangle + diff_row = n_row_all - n_row_trapezoid + rectangle_size = max(0, diff_row * col) + + return trapezoid_size, rectangle_size, m_first_row + + +def _trilu_checks( + name: str, + row: int, + col: int, + dtype: torch.dtype, + layout: torch.layout, + pin_memory: bool, +): + torch._check(row >= 0, lambda: f"row must be non-negative, got {row}") + torch._check(col >= 0, lambda: f"col must be non-negative, got {col}") + torch._check( + dtype in (torch.int32, torch.int64), + lambda: f"\"{name}\" not implemented for '{dtype}'", + ) + + +# This is based on tril_indices_cuda in aten/src/ATen/native/cuda/TensorFactories.cu +@register_decomposition(aten.tril_indices) +@out_wrapper() +def tril_indices( + row: int, + col: int, + offset: int = 0, + *, + dtype: torch.dtype = torch.long, + layout: torch.layout = torch.strided, + device: DeviceLikeType = "cpu", + pin_memory: bool = False, +) -> TensorLikeType: + _trilu_checks("tril_indices", row, col, dtype, layout, pin_memory) + + trapezoid_size, rectangle_size, m_first_row = _get_tril_sizes(row, col, offset) + row_offset = max(0, -offset) + + arange_kw = partial( + torch.arange, layout=layout, device=device, pin_memory=pin_memory + ) + + # first we do the indices for top trapezoid + xs1 = arange_kw(0, trapezoid_size, dtype=torch.float64) + b = m_first_row - 0.5 + row_inds1 = torch.floor(-b + torch.sqrt(b * b + 2 * xs1)) + col_inds1 = torch.floor(xs1 - (2 * m_first_row - 1 + row_inds1) * row_inds1 * 0.5) + row_inds1 = _maybe_convert_to_dtype(row_inds1 + row_offset, dtype) + col_inds1 = _maybe_convert_to_dtype(col_inds1, dtype) + + # then bottom rectangle + xs2 = arange_kw(0, rectangle_size, dtype=dtype) + row_inds2 = xs2 // col + (col - m_first_row + 1 + row_offset) + col_inds2 = xs2 % col + + return torch.stack( + (torch.cat((row_inds1, row_inds2)), torch.cat((col_inds1, col_inds2))) + ) + + +# Similar to _get_tril_sizes above, but here there is a top trapezoid and +# a bottom rectangle instead. Note that you can't reduce this to +# _get_tril_sizes(col, row, -offset) because that would correspond to +# decomposing into a left trapezoid and right rectangle. +def _get_triu_sizes(row: int, col: int, offset: int) -> Tuple[int, int, int]: + if row == 0 or col == 0: + return 0, 0, 0 + + m_first_row = max(0, col - offset) if offset > 0 else col + + # Number of elements in top rectangle + rectangle_size = max(0, min(row, -offset) * col) + + # Number of elements in bottom trapezoid + trapezoid_size_tril, rectangle_size_tril, _ = _get_tril_sizes(row, col, offset - 1) + triu_size = row * col - (trapezoid_size_tril + rectangle_size_tril) + trapezoid_size = triu_size - rectangle_size + + return trapezoid_size, rectangle_size, m_first_row + + +@register_decomposition(aten.triu_indices) +@out_wrapper() +def triu_indices( + row: int, + col: int, + offset: int = 0, + *, + dtype: torch.dtype = torch.long, + layout: torch.layout = torch.strided, + device: DeviceLikeType = "cpu", + pin_memory: bool = False, +) -> TensorLikeType: + _trilu_checks("triu_indices", row, col, dtype, layout, pin_memory) + + trapezoid_size, rectangle_size, m_first_row = _get_triu_sizes(row, col, offset) + col_offset = max(0, offset) + + arange_kw = partial( + torch.arange, layout=layout, device=device, pin_memory=pin_memory + ) + + # indices for top rectangle + xs2 = arange_kw(0, rectangle_size, dtype=dtype) + row_inds2 = xs2 // col + col_inds2 = xs2 % col + + # bottom trapezoid + xs1 = arange_kw(0, trapezoid_size, dtype=torch.float64) + b = -0.5 - m_first_row + row_inds1 = torch.floor(-b - torch.sqrt(b * b - 2 * xs1)) + col_inds1 = torch.floor(xs1 - ((2 * m_first_row - 1 - row_inds1) * row_inds1) * 0.5) + row_inds1 = _maybe_convert_to_dtype(row_inds1, dtype) + col_inds1 = _maybe_convert_to_dtype(col_inds1, dtype) + + if col: + row_inds1 = row_inds1 + (rectangle_size // col) + col_inds1 = col_inds1 + col_offset + + return torch.stack( + (torch.cat((row_inds2, row_inds1)), torch.cat((col_inds2, col_inds1))) + ) + + +@register_decomposition(aten.bucketize) +@out_wrapper(exact_dtype=True) +def bucketize( + a: TensorLikeType, + boundaries: TensorLikeType, + *, + out_int32: bool = False, + right: bool = False, +): + torch._check( + boundaries.dim() == 1, + lambda: f"boundaries tensor must be 1 dimension but got dim({boundaries.dim()})", + ) + + out_dtype = torch.int32 if out_int32 else torch.int64 + n_boundaries = boundaries.shape[-1] + if n_boundaries == 0: + return torch.zeros_like(a) + # We are trying to find the bucket (defined by pairs of consecutive elements of `boundaries`) + # each element of `a` belongs to. We use binary search to achieve logarithimic complexity, + # but each step of the search is done "in parallel" over all elements of `a` + # can't use int32 as indexes, so we have to do all computations with int64 and convert at the end + start = torch.zeros(a.shape, device=a.device, dtype=torch.int64) + end = start + n_boundaries + # Max depth of the binary search + # Since we can't break out of the loop at different points for different elements of a, + # we just do the max amount of iterations that binary search requires and add condition + # tensor (cond_update below) to stop updating once the search terminates + + # For first iteration through loop we can skip some checks, we have separate implementation + mid = start + (end - start) // 2 + mid_val = boundaries[mid] + if right: + cond_mid = mid_val > a + else: + cond_mid = mid_val >= a + start = torch.where(cond_mid, start, mid + 1) + + if n_boundaries > 1: + cond_update = torch.ones_like(a, dtype=torch.bool) + niters = int(math.log2(n_boundaries)) + for _ in range(niters): + end = torch.where(cond_mid & cond_update, mid, end) + cond_update = start < end + # start might end up pointing to 1 past the end, we guard against that + mid = torch.where(cond_update, start + (end - start) // 2, 0) + mid_val = boundaries[mid] + # If right is true, the buckets are closed on the *left* + # (i.e., we are doing the equivalent of std::upper_bound in C++) + # Otherwise they are closed on the right (std::lower_bound) + if right: + cond_mid = mid_val > a + else: + cond_mid = mid_val >= a + start = torch.where((~cond_mid) & cond_update, mid + 1, start) + + return start.to(dtype=out_dtype) + + +@register_decomposition(aten.cauchy) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def cauchy(self, median=0, sigma=1, generator=None): + assert generator is None + torch._check( + not utils.is_complex_dtype(self.dtype) + and not utils.is_integer_dtype(self.dtype) + and not utils.is_boolean_dtype(self.dtype), + lambda: f"Cauchy distribution is a continuous probability distribution. \ + dtype must be a floating point but you specified {self.dtype}", + ) + torch._check( + sigma > 0.0, + lambda: f"cauchy_ expects sigma > 0.0, but found sigma={sigma}", + ) + return median + sigma * torch.tan(math.pi * (torch.rand_like(self) - 0.5)) + + +@register_decomposition(aten.exponential) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def exponential(self, rate=1, generator=None): + assert generator is None + torch._check( + not utils.is_complex_dtype(self.dtype) + and not utils.is_integer_dtype(self.dtype) + and not utils.is_boolean_dtype(self.dtype), + lambda: f"Exponential distribution is a continuous probability distribution. \ + dtype must be a floating point but you specified {self.dtype}", + ) + torch._check( + rate > 0.0, + lambda: f"exponential_ expects lambda > 0.0, but found lambda={rate}", + ) + return -1 / rate * torch.log1p(-torch.rand_like(self)) + + +@register_decomposition(aten.geometric) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def geometric(self, p, generator=None): + assert generator is None + # TODO: fix inductor rand_like for integer, bool dtypes + torch._check( + not utils.is_complex_dtype(self.dtype) + and not utils.is_boolean_dtype(self.dtype), + lambda: f"geometric not implemented for {self.dtype}", + ) + torch._check( + 0 < p and p < 1, + lambda: f"geometric_ expects p to be in (0, 1), but got p={p}", + ) + return torch.floor(torch.log1p(-torch.rand_like(self)) / math.log1p(-p)) + 1 + + +@register_decomposition(aten.log_normal) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def log_normal(self, mean=1, std=2, generator=None): + assert generator is None + torch._check( + not utils.is_complex_dtype(self.dtype) + and not utils.is_integer_dtype(self.dtype) + and not utils.is_boolean_dtype(self.dtype), + lambda: f"log_normal not implemented for {self.dtype}", + ) + torch._check( + 0 < std, + lambda: f"log_normal_ expects std > 0.0, but found std={std}", + ) + return torch.exp(std * torch.randn_like(self) + mean) + + +# TODO: add support for functionalization aten.normal_functional +# NOTE: the device and dtype will be ignored when shape is None +@register_decomposition(aten.normal) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=( + "mean", + "std", + ), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def normal( + mean=0, + std=1, + size=None, + *, + generator=None, + dtype=None, + layout=None, + device=None, + pin_memory=None, +): + assert layout is None or layout == torch.strided + + if not isinstance(std, TensorLike): + torch._check( + std >= 0, lambda: f"normal expects std >= 0.0, but found std {std}" + ) + + if size is None: + tensors = tuple(t for t in (mean, std) if isinstance(t, TensorLike)) + torch._check( + len(tensors) > 0, + lambda: "normal expects that either mean or std is a tensor, or size is defined", + ) + torch._check( + layout is None and pin_memory is None, + lambda: "Cannot pass layout, or pin_memory without size", + ) + + size = _broadcast_shapes(*(t.shape for t in tensors)) + dtype = tensors[0].dtype + device = tensors[0].device + else: + torch._check( + not isinstance(mean, TensorLike) and not isinstance(std, TensorLike), + lambda: "normal expects mean and std to be scalars when size is defined", + ) + dtype = torch.get_default_dtype() if dtype is None else dtype + device = torch.device("cpu") if device is None else device + + normal_samples = prims.normal( + size, + mean=0.0, + std=1.0, + dtype=dtype, + device=device, + requires_grad=False, + generator=generator, + ) + return std * normal_samples + mean + + +@register_decomposition(aten.normal_) +def normal_(self, mean=0, std=1, *, generator=None): + return normal(mean, std, self.shape, out=self, generator=generator) + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def rad2deg(self: TensorLikeType): + torch._check( + not utils.is_complex_dtype(self.dtype), + lambda: "rad2deg is not supported for complex tensors.", + ) + M_180_PI = 57.295779513082320876798154814105170332405472466564 + return self * M_180_PI + + +@_make_elementwise_unary_reference(ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) +def deg2rad(self: TensorLikeType): + torch._check( + not utils.is_complex_dtype(self.dtype), + lambda: "deg2rad is not supported for complex tensors.", + ) + M_PI_180 = 0.017453292519943295769236907684886127134428718885417 + return self * M_PI_180 + + +@register_decomposition(aten.count_nonzero) +@out_wrapper() +def count_nonzero(self, dim: Optional[DimsType] = None): + return (self != 0).sum(dim) + + +def _dot_check(self, other): + torch._check( + self.dim() == 1 and other.dim() == 1, + lambda: f"1D tensors expected, but got {self.dim()}D and {other.dim()}D tensors", + ) + + def numel_error(): + return ( + f"inconsistent tensor size, expected tensor [{self.numel()}] and src [{other.numel()}] to have the" + f"same number of elements, but got {self.numel()} and {other.numel()} elements respectively" + ) + + torch._check(self.numel() == other.numel(), numel_error) + + +@register_decomposition(aten.dot) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "other"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def dot(self, other): + if self.is_complex(): + if self.is_conj(): + if other.is_conj(): + return torch.dot(self.conj(), other.conj()).conj() + else: + return torch.vdot(self.conj(), other) + elif other.is_conj(): + return torch.vdot(other.conj(), self) + + _dot_check(self, other) + return (self * other).sum() + + +@register_decomposition(aten.vdot) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("self", "other"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def vdot(self, other): + if not self.is_complex(): + return torch.dot(self, other) + + if self.is_conj(): + if other.is_conj(): + return torch.vdot(other.conj(), self.conj()) + else: + return torch.dot(self.conj(), other) + elif other.is_conj(): + return torch.dot(self, other.conj()).conj() + + _dot_check(self, other) + # The decomposition fails if you do self.conj()... not sure why + return (self.conj_physical() * other).sum() + + +# inplace +abs_ = _make_inplace(abs) +acos_ = _make_inplace(acos) +acosh_ = _make_inplace(acosh) +add_ = _make_inplace(add) +addcmul_ = _make_inplace(addcmul) +addcdiv_ = _make_inplace(addcdiv) +asin_ = _make_inplace(asin) +asinh_ = _make_inplace(asinh) +atan_ = _make_inplace(atan) +atanh_ = _make_inplace(atanh) +atan2_ = _make_inplace(atan2) +bitwise_and_ = _make_inplace(bitwise_and) +bitwise_left_shift_ = _make_inplace(bitwise_left_shift) +bitwise_not_ = _make_inplace(bitwise_not) +bitwise_or_ = _make_inplace(bitwise_or) +bitwise_right_shift_ = _make_inplace(bitwise_right_shift) +bitwise_xor_ = _make_inplace(bitwise_xor) +ceil_ = _make_inplace(ceil) +clamp_ = _make_inplace(clamp) +clamp_min_ = _make_inplace(clamp_min) +clamp_max_ = _make_inplace(clamp_max) +conj_physical_ = _make_inplace(conj_physical) +copysign_ = _make_inplace(copysign) +cos_ = _make_inplace(cos) +cosh_ = _make_inplace(cosh) +cumsum_ = _make_inplace(cumsum) +cumprod_ = _make_inplace(cumprod) +deg2rad_ = _make_inplace(deg2rad) +digamma_ = _make_inplace(digamma) +div_ = _make_inplace(div) +eq_ = _make_inplace(eq) +erf_ = _make_inplace(erf) +erfc_ = _make_inplace(erfc) +erfinv_ = _make_inplace(erfinv) +exp_ = _make_inplace(exp) +exp2_ = _make_inplace(exp2) +expm1_ = _make_inplace(expm1) +float_power_ = _make_inplace(float_power) +floor_ = _make_inplace(floor) +floor_divide_ = _make_inplace(floor_divide) +fmod_ = _make_inplace(fmod) +frac_ = _make_inplace(frac) +gcd_ = _make_inplace(gcd) +ge_ = _make_inplace(ge) +gt_ = _make_inplace(gt) +heaviside_ = _make_inplace(heaviside) +hypot_ = _make_inplace(hypot) +igamma_ = _make_inplace(igamma) +igammac_ = _make_inplace(igammac) +i0_ = _make_inplace(i0) +lcm_ = _make_inplace(lcm) +le_ = _make_inplace(le) +lerp_ = _make_inplace(lerp) +lgamma_ = _make_inplace(lgamma) +log10_ = _make_inplace(log10) +log1p_ = _make_inplace(log1p) +log2_ = _make_inplace(log2) +log_ = _make_inplace(log) +logical_and_ = _make_inplace(logical_and) +logical_not_ = _make_inplace(logical_not) +logical_or_ = _make_inplace(logical_or) +logical_xor_ = _make_inplace(logical_xor) +lt_ = _make_inplace(lt) +mul_ = _make_inplace(mul) +mvlgamma_ = _make_inplace(mvlgamma) +nan_to_num_ = _make_inplace(nan_to_num) +ne_ = _make_inplace(ne) +neg_ = _make_inplace(neg) +nextafter_ = _make_inplace(nextafter) +pow_ = _make_inplace(pow) +rad2deg_ = _make_inplace(rad2deg) +reciprocal_ = _make_inplace(reciprocal) +remainder_ = _make_inplace(remainder) +rsqrt_ = _make_inplace(rsqrt) +sgn_ = _make_inplace(sgn) +sigmoid_ = _make_inplace(sigmoid) +sign_ = _make_inplace(sign) +sin_ = _make_inplace(sin) +sinc_ = _make_inplace(sinc) +sinh_ = _make_inplace(sinh) +sqrt_ = _make_inplace(sqrt) +square_ = _make_inplace(square) +sub_ = _make_inplace(sub) +tan_ = _make_inplace(tan) +tanh_ = _make_inplace(tanh) +tril_ = _make_inplace(tril) +triu_ = _make_inplace(triu) +true_divide_ = _make_inplace(true_divide) +trunc_ = _make_inplace(trunc) +xlogy_ = _make_inplace(xlogy) +cauchy_ = _make_inplace(cauchy) +exponential_ = _make_inplace(exponential) +geometric_ = _make_inplace(geometric) +log_normal_ = _make_inplace(log_normal) +zero_ = _make_inplace(zero) + + +# xref: isStorage in torch/csrc/DynamicTypes.cpp +def _isStorage(obj): + return isinstance(obj, (torch.TypedStorage, torch.UntypedStorage)) + + +# xref: compute_sizes in torch/csrc/utils/tensor_new.cpp +def _compute_sizes(seq, scalar_type): + MAX_DIMS = 128 + is_storage = _isStorage(seq) + sizes = [] + # TODO: this is inaccurate, we actually test PySequence_Check + while isinstance(seq, (list, tuple)): + length = len(seq) + if is_storage: + length //= scalar_type.itemsize + sizes.append(length) + if len(sizes) > MAX_DIMS: + raise ValueError(f"too many dimensions '{type(seq).__name__}'") + if length == 0: + break + try: + handle = seq[0] + except Exception: + raise ValueError( # noqa: TRY200 + f"could not determine the shape of object type '{type(seq).__name__}'" + ) + seq = handle + + return sizes + + +# xref: infer_scalar_type in torch/csrc/utils/tensor_new.cpp +def _infer_scalar_type(obj): + if isinstance(obj, FloatLike): + return torch.get_default_dtype() + if isinstance(obj, IntLike) and not isinstance(obj, bool): # careful! + return torch.int64 + if isinstance(obj, bool): + return torch.bool + if isinstance(obj, complex): + default_dtype = torch.get_default_dtype() + if default_dtype is torch.float: + return torch.cfloat + elif default_dtype is torch.double: + return torch.cdouble + else: + raise RuntimeError("invalid default scalar type for complex") + if isinstance(obj, torch.Tensor): + return obj.dtype + if isinstance(obj, str): + raise TypeError(f"new(): invalid data type '{type(obj).__name__}'") + # TODO: this is inaccurate, we actually test PySequence_Check + if isinstance(obj, (list, tuple)): + scalarType = None + length = len(obj) + # match NumPy semantics, except use default tensor type instead of + # double. + if length == 0: + return torch.get_default_dtype() + for i in range(length): + cur_item = obj[i] + # TODO: test this + """ + if cur_item is obj: + raise TypeError("new(): self-referential lists are incompatible") + """ + item_scalarType = _infer_scalar_type(cur_item) # recurse! + if scalarType is not None: + scalarType = torch.promote_types(scalarType, item_scalarType) + else: + scalarType = item_scalarType + if scalarType is torch.cdouble: + # this won't change (unless we hit undefined, but that will + # fail later) + return scalarType + return scalarType + raise RuntimeError(f"Could not infer dtype of {type(obj).__name__}") + + +# Analogous to recursive_store +# xref: recursive_store in torch/csrc/utils/tensor_new.cpp +def _recursive_build(scalarType: torch.dtype, obj: TensorOrNumberLikeType): + if isinstance(obj, Tensor) and obj.ndim <= 1: + obj = obj.item() + # fall through into next case + if isinstance(obj, Number): + return torch.scalar_tensor(obj, dtype=scalarType) + + seq = obj + return torch.stack([_recursive_build(scalarType, item) for item in seq]) + + +# xref: internal_new_from_data in torch/csrc/utils/tensor_new.cpp +def _internal_new_from_data( + options, + scalar_type, + device_opt, + data, + copy_variables, + copy_numpy, + type_inference, + pin_memory=False, +): + if isinstance(data, torch.Tensor): + torch._check( + not pin_memory, lambda: "Can't pin tensor constructed from a variable" + ) + var = data + if copy_variables: + var = var.detach() + inferred_scalar_type = var.dtype if type_inference else scalar_type + device = device_opt if device_opt is not None else var.device + return var.to( + device=device, + dtype=inferred_scalar_type, + non_blocking=False, + copy=copy_variables, + ) + + # TODO + if hasattr(data, "__cuda_array_interface__"): + return NotImplemented + + # TODO: test for numpy input with PyArray_Check + + device = device_opt if device_opt is not None else options["device"] + inferred_scalar_type = _infer_scalar_type(data) if type_inference else scalar_type + + # NB: Don't need to avoid tracing, as we aren't going to do any manual + # pointer filling tricks + if _isStorage(data): + return NotImplemented + else: + if torch.device(device).type == "meta": + return NotImplemented + + # In the C implementation, we would directly start poking the memory + # of a freshly allocated CPU tensor. Here, we're going to do an + # alternate, heinously slow implementation: turn each individual + # scalar into a tensor, and then repeatedly cat them together + tensor = _recursive_build(inferred_scalar_type, data) + + tensor = tensor.to(device, inferred_scalar_type, non_blocking=False, copy=False) + + # NB: lift_fresh is not needed, because we built the tensor from scalars + # guaranteeing a fresh tensor in this case + return tensor + + +# xref: tensor_ctor in torch/csrc/utils/tensor_new.cpp +def tensor(data, *, dtype=None, device=None, pin_memory=False, requires_grad=False): + # TODO (or not): support names kwarg + if isinstance(data, torch.Tensor): + warnings.warn( + "To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach() " + "or sourceTensor.clone().detach().requires_grad_(True), rather than torch.tensor(sourceTensor)" + ) + type_inference = dtype is None + new_tensor = _internal_new_from_data( + # device="cpu" because that's what you get with torch.tensor(2) no + # device by default + {"device": "cpu"}, # TODO: use torch.get_default_tensor_type + dtype if dtype is not None else torch.get_default_dtype(), + device, + data, + copy_variables=True, + copy_numpy=True, + type_inference=type_inference, + pin_memory=pin_memory, + ) + new_tensor.detach_() + new_tensor.requires_grad_(requires_grad) + return new_tensor + + +# Views +# We can't model these as above, as the pattern of doing `op(a, out=a)` does not work for a view function +# given that it does not reshape the input (it just copies the result into it) + +# squeeze_ = _make_inplace(squeeze) +# t_ = _make_inplace(t) +# transpose_ = _make_inplace(transpose) +# unsqueeze_ = _make_inplace(unsqueeze) + + +import torch._refs._conversions +import torch._refs.fft +import torch._refs.linalg +import torch._refs.nn.functional +import torch._refs.special diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7e1ba7c73bfe082e175f9cd3043eda178adcc49 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_refs/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..688a7e8c896648aa89dc6381e42f077363183200 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_refs/__pycache__/_conversions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_refs/__pycache__/fft.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_refs/__pycache__/fft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f283c410d5145841b59b53510e69b7fa332a7e1e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_refs/__pycache__/fft.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_refs/_conversions.py b/llmeval-env/lib/python3.10/site-packages/torch/_refs/_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..fa1ca2428255aa9fe3892328f6ab95cc5f5b7568 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_refs/_conversions.py @@ -0,0 +1,118 @@ +import torch +import torch._prims_common as utils + +# Utilities should come BEFORE this import +from torch._decomp import register_decomposition + +from torch._prims_common import TensorLikeType +from torch._prims_common.wrappers import out_wrapper +from torch._refs import _broadcast_shapes + +# Data conversion references. +# +# Note: this module breaks the usual _refs to torch naming scheme where +# _refs.foo.bar is a ref for torch.foo.bar. The following definitions are not +# part of _refs/__init__.py to avoid name clashes with Python builtin types +# (like int). + +__all__ = [ + # dtypes + "bfloat16", + "bool", + "byte", + "cdouble", + "cfloat", + "chalf", + "char", + "double", + "float", + "half", + "int", + "long", + "short", + # misc + "complex", + "polar", +] + + +def _make_conversion_method(name: str, dtype: torch.dtype): + def fn( + self: TensorLikeType, memory_format: torch.memory_format = torch.preserve_format + ) -> TensorLikeType: + return self.to(dtype, memory_format=memory_format) # type: ignore[call-overload] + + fn.__name__ = name + return fn + + +bfloat16 = _make_conversion_method("bfloat16", torch.bfloat16) + +bool = _make_conversion_method("bool", torch.bool) + +byte = _make_conversion_method("byte", torch.uint8) + +cdouble = _make_conversion_method("cdouble", torch.cdouble) + +cfloat = _make_conversion_method("cfloat", torch.cfloat) + +chalf = _make_conversion_method("chalf", torch.complex32) + +char = _make_conversion_method("char", torch.int8) + +double = _make_conversion_method("double", torch.double) + +float = _make_conversion_method("float", torch.float) + +half = _make_conversion_method("half", torch.half) + +int = _make_conversion_method("int", torch.int) + +long = _make_conversion_method("long", torch.long) + +short = _make_conversion_method("short", torch.short) + + +@register_decomposition(torch._ops.ops.aten.complex) +# Note: complex has type promotion tests disabled due to different semantics. +# exact_dtype is for compat with complex_check_dtype from core. +@out_wrapper(exact_dtype=True) +def complex(real: TensorLikeType, imag: TensorLikeType) -> TensorLikeType: + allowed_dtypes = (torch.float32, torch.float64, torch.float16) + torch._check( + real.dtype in allowed_dtypes and imag.dtype in allowed_dtypes, + lambda: ( + f"Expected both inputs to be Half, Float or Double tensors but got " + f"{real.dtype} and {imag.dtype}" + ), + ) + torch._check( + real.dtype == imag.dtype, + lambda: ( + f"Expected object of scalar type {real.dtype} but got " + f"scalar type {imag.dtype} for second argument" + ), + ) + result_dtype = utils.corresponding_complex_dtype(real.dtype) # type: ignore[arg-type] + common_shape = _broadcast_shapes(real.shape, imag.shape) + result = real.new_empty( + common_shape, + dtype=result_dtype, + layout=real.layout, + device=real.device, + # pin_memory=real.is_pinned(), # NYI + ) + result.real = real + result.imag = imag + return result + + +@register_decomposition(torch._ops.ops.aten.polar) +# Note: polar has type promotion tests disabled due to different semantics. +# exact_dtype is for compat with complex_check_dtype from core. +@out_wrapper(exact_dtype=True) +def polar(abs: TensorLikeType, angle: TensorLikeType) -> TensorLikeType: + result = torch.complex(abs, angle) + result.real = abs * torch.cos(angle) + result.imag = abs * torch.sin(angle) + return result diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b948e1eccc0b785eeccd000b39b9d68725b39e17 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_refs/linalg/__init__.py @@ -0,0 +1,308 @@ +from functools import partial + +from typing import List, Optional, Tuple, Union + +import torch + +import torch._prims as prims + +import torch._prims_common as utils +import torch._refs as refs +import torch._refs.linalg as linalg +from torch import Tensor +from torch._prims_common import ( + check_fp_or_complex, + check_is_matrix, + Dim, + DimsType, + ELEMENTWISE_TYPE_PROMOTION_KIND, + IntLike, + NumberType, + TensorLikeType, +) +from torch._prims_common.wrappers import ( + _maybe_convert_to_dtype, + elementwise_type_promotion_wrapper, + out_wrapper, +) + + +__all__ = [ + "diagonal", + "matrix_norm", + "norm", + "svd", + "svdvals", + "vector_norm", + "vecdot", + "cross", +] + + +def _check_norm_dtype(dtype: Optional[torch.dtype], x_dtype: torch.dtype, fn_name: str): + """ + Checks related to the dtype kwarg in `linalg.*norm` functions + """ + if dtype is not None: + torch._check( + utils.is_float_dtype(dtype) or utils.is_complex_dtype(dtype), + lambda: f"{fn_name}: dtype should be floating point or complex. Got {dtype}", + ) + torch._check( + utils.is_complex_dtype(dtype) == utils.is_complex_dtype(x_dtype), + lambda: "{fn_name}: dtype should be {d} for {d} inputs. Got {dtype}".format( + fn_name=fn_name, + d="complex" if utils.is_complex_dtype(x_dtype) else "real", + dtype=dtype, + ), + ) + torch._check( + utils.get_higher_dtype(dtype, x_dtype) == dtype, + lambda: f"{fn_name}: the dtype of the input ({x_dtype}) should be convertible " + "without narrowing to the specified dtype ({dtype})", + ) + + +# Utilities should come BEFORE this import +from torch._decomp import register_decomposition +from torch._decomp.decompositions import pw_cast_for_opmath + + +@register_decomposition(torch._ops.ops.aten.linalg_cross) +@out_wrapper() +@pw_cast_for_opmath +def cross(a: Tensor, b: Tensor, dim: int = -1): + torch._check( + a.ndim == b.ndim, + lambda: "linalg.cross: inputs must have the same number of dimensions.", + ) + torch._check( + a.size(dim) == 3 and b.size(dim) == 3, + lambda: f"linalg.cross: inputs dim {dim} must have length 3, got {a.size(dim)} and {b.size(dim)}", + ) + a, b = torch.broadcast_tensors(a, b) + dim = utils.canonicalize_dim(a.ndim, dim) + idx = torch.arange(3, device=a.device) + return a.index_select(dim, (idx + 1) % 3) * b.index_select( + dim, (idx + 2) % 3 + ) - a.index_select(dim, (idx + 2) % 3) * b.index_select(dim, (idx + 1) % 3) + + +def diagonal( + input: TensorLikeType, + *, + offset: int = 0, + dim1: int = -2, + dim2: int = -1, +) -> TensorLikeType: + return torch.diagonal(input, offset=offset, dim1=dim1, dim2=dim2) + + +@register_decomposition(torch._ops.ops.aten.linalg_vector_norm) +@out_wrapper(exact_dtype=True) +def vector_norm( + x: TensorLikeType, + ord: Union[float, int] = 2, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, +) -> Tensor: + # Checks + check_fp_or_complex(x.dtype, "linalg.vector_norm") + + if isinstance(dim, Dim): + dim = [dim] # type: ignore[assignment] + + if x.numel() == 0 and (ord < 0.0 or ord == float("inf")): + torch._check( + dim is not None and len(dim) != 0, + lambda: f"linalg.vector_norm cannot compute the {ord} norm on an empty tensor " + "because the operation does not have an identity", + ) + shape = x.shape + assert dim is not None # mypy does not seem to be able to see through check? + for d in dim: + torch._check( + shape[d] != 0, + lambda: f"linalg.vector_norm cannot compute the {ord} norm on the " + f"dimension {d} because this dimension is empty and the " + "operation does not have an identity", + ) + _check_norm_dtype(dtype, x.dtype, "linalg.vector_norm") + + computation_dtype, result_dtype = utils.reduction_dtypes( + x, utils.REDUCTION_OUTPUT_TYPE_KIND.COMPLEX_TO_FLOAT, dtype + ) + + to_result_dtype = partial(_maybe_convert_to_dtype, dtype=result_dtype) + + # Implementation + if ord == 0.0: + return torch.sum(torch.ne(x, 0.0), dim=dim, keepdim=keepdim, dtype=result_dtype) + elif ord == float("inf"): + return to_result_dtype(torch.amax(torch.abs(x), dim=dim, keepdim=keepdim)) # type: ignore[return-value,arg-type] + elif ord == float("-inf"): + return to_result_dtype(torch.amin(torch.abs(x), dim=dim, keepdim=keepdim)) # type: ignore[return-value,arg-type] + else: + # From here on the computation dtype is important as the reduction is non-trivial + x = _maybe_convert_to_dtype(x, computation_dtype) # type: ignore[assignment] + reduce_sum = partial(torch.sum, dim=dim, keepdim=keepdim) + + is_ord_even = ord % 2 == 0 if isinstance(ord, IntLike) else ord % 2.0 == 0.0 + if not (is_ord_even and utils.is_float_dtype(x.dtype)): + x = torch.abs(x) + return to_result_dtype(torch.pow(reduce_sum(torch.pow(x, ord)), 1.0 / ord)) # type: ignore[return-value] + + +def _backshift_permutation(dim0, dim1, ndim): + # Auxiliary function for matrix_norm + # Computes the permutation that moves the two given dimensions to the back + ret = [i for i in range(ndim) if i != dim0 and i != dim1] + ret.extend((dim0, dim1)) + return ret + + +def _inverse_permutation(perm): + # Given a permutation, returns its inverse. It's equivalent to argsort on an array + return [i for i, j in sorted(enumerate(perm), key=lambda i_j: i_j[1])] + + +# CompositeImplicitAutograd +@out_wrapper(exact_dtype=True) +def matrix_norm( + A: TensorLikeType, + ord: Union[float, str] = "fro", + dim: DimsType = (-2, -1), + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # shape + check_is_matrix(A, "linalg.matrix_norm") + # dim + dim = utils.canonicalize_dims(A.ndim, dim) + if isinstance(dim, Dim): + dim = (dim,) # type: ignore[assignment] + torch._check( + len(dim) == 2, lambda: "linalg.matrix_norm: dim must be a 2-tuple. Got {dim}" + ) + torch._check( + dim[0] != dim[1], + lambda: "linalg.matrix_norm: dims must be different. Got ({dim[0]}, {dim[1]})", + ) + # dtype arg + _check_norm_dtype(dtype, A.dtype, "linalg.matrix_norm") + + if isinstance(ord, str): + # ord + torch._check( + ord in ("fro", "nuc"), + lambda: "linalg.matrix_norm: Order {ord} not supported.", + ) + # dtype + check_fp_or_complex( + A.dtype, "linalg.matrix_norm", allow_low_precision_dtypes=ord != "nuc" + ) + + if ord == "fro": + return vector_norm(A, 2, dim, keepdim, dtype=dtype) + else: # ord == "nuc" + if dtype is not None: + A = _maybe_convert_to_dtype(A, dtype) # type: ignore[assignment] + perm = _backshift_permutation(dim[0], dim[1], A.ndim) + result = torch.sum(svdvals(prims.transpose(A, perm)), -1, keepdim) + if keepdim: + inv_perm = _inverse_permutation(perm) + result = prims.transpose(torch.unsqueeze(result, -1), inv_perm) + return result + else: + # ord + abs_ord = abs(ord) + torch._check( + abs_ord in (2, 1, float("inf")), + lambda: "linalg.matrix_norm: Order {ord} not supported.", + ) + # dtype + check_fp_or_complex( + A.dtype, "linalg.matrix_norm", allow_low_precision_dtypes=ord != 2 + ) + + max_min = partial(torch.amax if ord > 0.0 else torch.amin, keepdim=keepdim) + + if abs_ord == 2.0: + if dtype is not None: + A = _maybe_convert_to_dtype(A, dtype) # type: ignore[assignment] + perm = _backshift_permutation(dim[0], dim[1], A.ndim) + result = max_min(svdvals(prims.transpose(A, perm)), dim=-1) + if keepdim: + inv_perm = _inverse_permutation(perm) + result = prims.transpose(torch.unsqueeze(result, -1), inv_perm) + return result + else: # 1, -1, inf, -inf + dim0, dim1 = dim + if abs_ord == float("inf"): + dim0, dim1 = dim1, dim0 + if not keepdim and (dim0 < dim1): + dim1 -= 1 + return max_min( + vector_norm(A, 1.0, dim=dim0, keepdim=keepdim, dtype=dtype), dim1 + ) + + +# CompositeImplicitAutograd +@out_wrapper(exact_dtype=True) +def norm( + A: TensorLikeType, + ord: Optional[Union[float, str]] = None, + dim: Optional[DimsType] = None, + keepdim: bool = False, + *, + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + if dim is not None: + if isinstance(dim, Dim): + dim = (dim,) # type: ignore[assignment] + torch._check( + len(dim) in (1, 2), + lambda: "linalg.norm: If dim is specified, it must be of length 1 or 2. Got {dim}", + ) + elif ord is not None: + torch._check( + A.ndim in (1, 2), + lambda: "linalg.norm: If dim is not specified but ord is, the input must be 1D or 2D. Got {A.ndim}D", + ) + + if ord is not None and ( + (dim is not None and len(dim) == 2) or (dim is None and A.ndim == 2) + ): + if dim is None: + dim = (0, 1) + return matrix_norm(A, ord, dim, keepdim, dtype=dtype) + else: + if ord is None: + ord = 2.0 + return vector_norm(A, ord, dim, keepdim, dtype=dtype) + + +# CompositeImplicitAutograd +@out_wrapper("U", "S", "Vh", exact_dtype=True) +def svd(A: TensorLikeType, full_matrices: bool = True) -> Tuple[Tensor, Tensor, Tensor]: + return prims.svd(A, full_matrices=full_matrices) + + +# CompositeImplicitAutograd +@out_wrapper(exact_dtype=True) +def svdvals(A: TensorLikeType) -> Tensor: + return svd(A, full_matrices=False)[1] + + +# CompositeImplicitAutograd +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("x", "y"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def vecdot(x: Tensor, y: Tensor, dim: int = -1) -> Tensor: + check_fp_or_complex(x.dtype, "linalg.vecdot") + return (x.conj() * y).sum(dim=dim) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78a17b0e8689b9902dac5c797742792f501ba223 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_refs/linalg/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_refs/nn/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/_refs/nn/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3646b96be90be51e86070360b23212ed450186a6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_refs/nn/__init__.py @@ -0,0 +1,3 @@ +from typing import List + +__all__: List[str] = [] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d6be2180e8b34cb1fdac215e6d885f9450c5103f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/_refs/nn/functional/__init__.py @@ -0,0 +1,1230 @@ +import math +from functools import wraps +from typing import Callable, Optional, Union + +import torch +import torch._prims as prims +import torch._prims_common as utils +import torch._refs as refs +from torch._decomp import register_decomposition +from torch._prims_common import ( + ELEMENTWISE_TYPE_PROMOTION_KIND, + NumberType, + ShapeType, + TensorLike, + TensorLikeType, +) +from torch._prims_common.wrappers import ( + elementwise_type_promotion_wrapper, + elementwise_unary_scalar_wrapper, + out_wrapper, +) +from torch._refs import _make_inplace + +__all__ = [ + "alpha_dropout", + "celu", + "celu_", + "dropout", + "elu", + "elu_", + "gelu", + "glu", + "group_norm", + "hardshrink", + "hardtanh", + "hinge_embedding_loss", + "huber_loss", + "l1_loss", + "layer_norm", + "leaky_relu", + "log_softmax", + "margin_ranking_loss", + "mish", + "mish_", + "mse_loss", + "nll_loss", + "pairwise_distance", + "pdist", + "poisson_nll_loss", + "prelu", + "relu", + "relu6", + "selu", + "selu_", + "smooth_l1_loss", + "softmax", + "softmin", + "softplus", + "softshrink", + "tanhshrink", + "threshold", + "threshold_", + "triplet_margin_loss", +] + +Tensor = torch.Tensor +aten = torch._ops.ops.aten +DispatchKey = torch._C.DispatchKey # type: ignore[attr-defined] + + +def _dropout_helper( + self: TensorLikeType, + val: float, +) -> TensorLikeType: + """ + Helper function for all dropout-type operators. During training, + some of the elements of the input tensor are randomly masked. + + Returns the masked tensor of the boolean values. + + """ + + return ( + refs._uniform_helper( + self.shape, low=0.0, high=1.0, dtype=torch.float32, device=self.device + ) + < val + ) + + +@register_decomposition(aten.alpha_dropout) +def alpha_dropout( + self: TensorLikeType, p: float = 0.5, training: bool = False, inplace: bool = False +) -> TensorLikeType: + if inplace: + raise NotImplementedError + + if not training: + return self + + torch._check( + p <= 1 and p >= 0, + lambda: f"dropout probability has to be between 0 and 1, but got, {p}", + ) + + if p == 1: + return torch.zeros_like(self) + + if p == 0: + return self + + dropout_mask = _dropout_helper(self, 1 - p) + + # From paper: Self-Normalizing Neural Networks (https://arxiv.org/pdf/1706.02515.pdf) + # alpha = - SELU.alpha * SELU.scale, here + # SELU.alpha = 1.6732632423543772848170429916717 and + # SELU.scale = 1.0507009873554804934193349852946 + alpha = -1.7580993408473766 + + a = 1.0 / math.sqrt((alpha * alpha * p + 1) * (1 - p)) + b = torch.logical_not(dropout_mask) + b = b * (alpha * a) + alpha * a * p + dropout_mask = a * dropout_mask + + return self * dropout_mask + b + + +def _inplace_wrapper(fn): + """ + Given a nn.functional non-linearity, implements its `inplace: bool` argument + """ + + # nb. We use the name of the first argument used in the unary references + @wraps(fn) + def _fn(a, *args, inplace=False, **kwargs): + if inplace: + torch._check( + "out" not in kwargs, + lambda: "Cannot set inplace=True and pass out= at the same time", + ) + return fn(a, *args, inplace=False, out=a, **kwargs) + else: + return fn(a, *args, inplace=False, **kwargs) + + return _fn + + +# celu is implemented specially because it has an alpha argument +# celu is very similar to elu +@register_decomposition(aten.celu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def celu( + a: TensorLikeType, alpha: Optional[NumberType] = None, inplace: bool = False +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.celu + """ + + if inplace: + raise NotImplementedError + + rhs: TensorLikeType + if alpha is not None: + python_type = utils.dtype_to_type(a.dtype) + if not utils.is_weakly_lesser_type(type(alpha), python_type): + msg = f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + rhs = alpha * torch.expm1(torch.true_divide(a, alpha)) # type: ignore[arg-type] + else: + rhs = torch.expm1(a) + + return torch.where(a > 0, a, rhs) + + +@_inplace_wrapper +@out_wrapper() +def dropout( + a: TensorLikeType, p: float = 0.5, training: bool = True, inplace: bool = False +) -> TensorLikeType: + if inplace: + raise NotImplementedError + + if not training: + return a + + torch._check( + p <= 1 and p >= 0, + lambda: f"dropout probability has to be between 0 and 1, but got, {p}", + ) + + if p == 1: + return torch.zeros_like(a) + + if p == 0: + return a + + scale = 1 / (1 - p) + dropout_mask = _dropout_helper(a, 1 - p) + + return a * dropout_mask * scale + + +@register_decomposition(aten.elu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def elu( + a: TensorLikeType, + alpha: NumberType = 1.0, + scale: NumberType = 1.0, + input_scale: NumberType = 1.0, + inplace: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.elu + """ + if inplace: + raise NotImplementedError + + # nb. This should be factored out into a can_cast aux function + python_type = utils.dtype_to_type(a.dtype) + torch._check( + utils.is_weakly_lesser_type(type(input_scale), python_type), + lambda: f"input_scale argument of type {type(input_scale)} cannot be safely cast to type {python_type}!", + ) + torch._check( + utils.is_weakly_lesser_type(type(scale), python_type), + lambda: f"scale argument of type {type(scale)} cannot be safely cast to type {python_type}!", + ) + torch._check( + utils.is_weakly_lesser_type(type(alpha), python_type), + lambda: f"alpha argument of type {type(alpha)} cannot be safely cast to type {python_type}!", + ) + + return torch.where(a > 0, scale * a, (alpha * scale) * torch.expm1(a * input_scale)) + + +@register_decomposition(aten.relu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def relu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.relu + """ + + if inplace: + raise NotImplementedError + + return torch.where(torch.le(a, 0), 0, a) + + +def group_norm( + input: Tensor, + num_groups: int, + weight: Optional[Tensor] = None, + bias: Optional[Tensor] = None, + eps: float = 1e-5, +) -> Tensor: + """ + Reference implementation of :func:`torch.nn.functional.group_norm`. + """ + torch._check( + input.ndim >= 2, + lambda: f"Expected at least 2 dimensions for input tensor but received {input.ndim}", + ) + + batch_size = input.shape[0] + num_channels = input.shape[1] + torch._check( + num_channels % num_groups == 0, + lambda: "Expected number of channels in input to be divisible by num_groups, " + + f"but got input of shape {input.shape} and num_groups = {num_groups}", + ) + + # input shape is (N, C, *), so we flatten all inner dimensions except (N, C) + flattened_inner_size = 1 + for dim_length in input.shape[2:]: + flattened_inner_size *= dim_length + + return torch.native_group_norm( + input, + weight, + bias, + batch_size, + num_channels, + flattened_inner_size, + num_groups, + eps, + )[0] + + +def layer_norm( + input: Tensor, + normalized_shape: ShapeType, + weight: Optional[Tensor] = None, + bias: Optional[Tensor] = None, + eps: float = 1e-5, +) -> Tensor: + """ + Reference implementation of :func:`torch.nn.functional.layer_norm`. + """ + return torch.native_layer_norm(input, normalized_shape, weight, bias, eps)[0] + + +@register_decomposition(aten.leaky_relu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def leaky_relu( + a: TensorLikeType, negative_slope: float = 0.01, inplace: bool = False +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.leaky_relu + """ + + if inplace: + raise NotImplementedError + + python_type = utils.dtype_to_type(a.dtype) + if not utils.is_weakly_lesser_type(type(negative_slope), python_type): + msg = f"negative_slope argument of type {type(negative_slope)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + return torch.where(torch.gt(a, 0), a, torch.mul(a, negative_slope)) + + +@register_decomposition(aten.mish) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def mish(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.mish + """ + + if inplace: + raise NotImplementedError + return a * torch.tanh(torch.nn.functional.softplus(a)) + + +@register_decomposition(aten.selu) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def selu(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.selu + """ + if inplace: + raise NotImplementedError + + alpha = 1.6732632423543772848170429916717 + scale = 1.0507009873554804934193349852946 + + rhs = alpha * torch.expm1(a) + + return scale * torch.where(a > 0, a, rhs) + + +# Forwarding alias: the functional variant doesn't support the out kwarg +# CompositeImplicitAutograd - don't register decomp +def softmax( + a: TensorLikeType, + dim: Optional[int] = None, + _stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True) + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # The error is for compat with regular PyTorch, which has this behavior + # deprecated. For PrimTorch, it's fine to drop support for deprecated + # behavior because it requires explicit opt in. This error is to inform + # users how to update their calls. + torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X") + return torch.softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +# CompositeImplicitAutograd - don't register decomp +def softmin( + a: TensorLikeType, + dim: Optional[int] = None, + _stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True) + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # The error is for compat with regular PyTorch, which has this behavior + # deprecated. For PrimTorch, it's fine to drop support for deprecated + # behavior because it requires explicit opt in. This error is to inform + # users how to update their calls. + torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X") + return torch.softmax(a=-a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +# softplus is implemented specially because it has beta and threshold arguments +@register_decomposition(aten.softplus) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def softplus( + a: TensorLikeType, + beta: Optional[NumberType] = None, + threshold: NumberType = 20, + inplace: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.softplus + """ + + if inplace: + raise NotImplementedError + + rhs: TensorLikeType + if beta is not None: + python_type = utils.dtype_to_type(a.dtype) + if not utils.is_weakly_lesser_type(type(beta), python_type): + msg = f"beta argument of type {type(beta)} cannot be safely cast to type {python_type}!" + raise ValueError(msg) + scaled_input = a * beta + rhs = torch.true_divide(torch.log1p(torch.exp(scaled_input)), beta) # type: ignore[arg-type] + + else: + scaled_input = a + rhs = torch.log1p(torch.exp(scaled_input)) + + return torch.where(scaled_input > threshold, a, rhs) + + +@aten.hardshrink.default.py_impl(DispatchKey.Autograd) +@register_decomposition(aten.hardshrink) +@out_wrapper() +def hardshrink(a: TensorLikeType, lambd: float = 0.5): + # Formula for reference, + # hardshrink(x) = x if x > lambd + # = x if x < -lambd + # = 0 otherwise + return torch.where(torch.abs(a) <= lambd, 0, a) + + +@aten.softshrink.default.py_impl(DispatchKey.Autograd) +@register_decomposition(aten.softshrink) +@out_wrapper() +def softshrink(a: TensorLikeType, lambd: float = 0.5): + # Formula for reference, + # softshrink(x) = x - lambd if x > lambd + # = x + lambd if x < -lambd + # = 0 otherwise + torch._check( + lambd >= 0, + lambda: f"lambda must be greater or equal to 0, but found to be {lambd}", + ) + # We implement this in one torch.where to generate better code in the backward + # see https://github.com/pytorch/pytorch/pull/107052#discussion_r1293748211 + return torch.where(torch.abs(a) > lambd, a - torch.sign(a) * lambd, 0) + + +# Losses +def _reduction_int_to_str(reduction: int) -> str: + from torch._decomp.decompositions import Reduction + + if reduction == Reduction.NONE.value: + return "none" + elif reduction == Reduction.MEAN.value: + return "mean" + elif reduction == Reduction.SUM.value: + return "sum" + else: + raise ValueError(f"{reduction} is not a valid value for reduction") + + +def _apply_loss_reduction(loss: TensorLikeType, reduction: str) -> TensorLikeType: + if reduction == "sum": + return torch.sum(loss) + elif reduction == "mean": + return torch.mean(loss) + else: # reduction == "none" + return loss + + +def _check_reduction_value(reduction: str): + if reduction not in ("mean", "sum", "none"): + raise ValueError(f"{reduction} is not a valid value for reduction") + + +# This helper function maps depreciated arguments, "size_average" and "reduce" +# to their corresponding "reduction" string argument +def _get_string_reduction_arg( + *, size_average: Optional[bool], reduce: Optional[bool] +) -> str: + if size_average is None: + size_average = True + if reduce is None: + reduce = True + if size_average and reduce: + ret = "mean" + elif reduce: + ret = "sum" + else: + ret = "none" + return ret + + +# CompositeImplicitAutograd - don't register decomp +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT, +) +def l1_loss( + input: TensorLikeType, + target: TensorLikeType, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.l1_loss + """ + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + _check_reduction_value(reduction) + loss = torch.abs(input - target) + return _apply_loss_reduction(loss, reduction) + + +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT, +) +def smooth_l1_loss( + input: TensorLikeType, + target: TensorLikeType, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", + beta: float = 1.0, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.smooth_l1_loss + """ + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + _check_reduction_value(reduction) + + if beta == 0.0: + return torch.nn.functional.l1_loss( + input, target, size_average=size_average, reduce=reduce, reduction=reduction + ) + else: + loss = torch.abs(input - target) + loss = torch.where(loss < beta, 0.5 * loss**2 / beta, loss - 0.5 * beta) + return _apply_loss_reduction(loss, reduction) + + +# Forwarding alias: the functional variant doesn't support the out kwarg +# CompositeImplicitAutograd - don't register decomp +def log_softmax( + a: TensorLikeType, + dim: Optional[int] = None, + _stacklevel: int = 3, # for compat when using TorchRefsMode(strict=True) + dtype: Optional[torch.dtype] = None, +) -> TensorLikeType: + # The error is for compat with regular PyTorch, which has this behavior + # deprecated. For PrimTorch, it's fine to drop support for deprecated + # behavior because it requires explicit opt in. This error is to inform + # users how to update their calls. + torch._check(dim is not None, lambda: "implicit dim not supported, use dim=X") + return torch.log_softmax(a=a, dim=dim, dtype=dtype) # type: ignore[call-overload] + + +@register_decomposition(aten.margin_ranking_loss) +def margin_ranking_loss( + input1: TensorLikeType, + input2: TensorLikeType, + target: TensorLikeType, + margin: float = 0.0, + reduction: str = "mean", +) -> TensorLikeType: + # loss_without_reduction = max(0, −target * (input1 − input2) + margin) + if input1.ndim != input2.ndim or input1.ndim != target.ndim: + raise RuntimeError( + "margin_ranking_loss : All input tensors should have same dimension but got sizes: " + f"input1: {input1.shape}, input2: {input2.shape}, target: {target.shape} " + ) + _check_reduction_value(reduction) + loss = torch.clamp_min(-target * (input1 - input2) + margin, 0) + return _apply_loss_reduction(loss, reduction) + + +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT, +) +def mse_loss( + input: TensorLikeType, + target: TensorLikeType, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + _check_reduction_value(reduction) + loss = torch.pow(input - target, 2) + return _apply_loss_reduction(loss, reduction) + + +@register_decomposition(aten.hinge_embedding_loss) +def hinge_embedding_loss( + input: TensorLikeType, + target: TensorLikeType, + margin: float = 1.0, + reduction: str = "mean", +) -> TensorLikeType: + # loss_without_reduction = input if y == 1 + # = max(0, margin - input) if y == -1 + _check_reduction_value(reduction) + margin_clamp = torch.clamp_min(margin - input, 0) + output_margin = torch.where(target != 1, margin_clamp, 0) + output_self = torch.where(target != -1, input, 0) + loss = output_margin + output_self + return _apply_loss_reduction(loss, reduction) + + +def _nll_loss_nd( + input: TensorLikeType, + target: TensorLikeType, + weight: Optional[TensorLikeType], + reduction: str, + ignore_index: int, +) -> TensorLikeType: + torch._check( + input.ndim > 0 and input.ndim <= 3, + lambda: f"Expected input dimension to be either [1, 2, 3] but received {input.ndim}.", + ) + + torch._check( + (input.ndim == 1) or (input.shape[0] == target.shape[0]), + lambda: f"Expected input batch size {input.shape[0]} to match target batch size {target.shape[0]}.", + ) + + _check_reduction_value(reduction) + + flat_target = torch.flatten(target) + ignore_classes_mask = torch.eq(flat_target, ignore_index) + + # TODO: Enable data-dependent checks with debug mode + # TODO: This check does not work with FakeTensor inputs; See Issue #85834 + # Explicit cast for class_check to bool; See Issue #78071 + """ + from torch._subclasses.fake_tensor import FakeTensor + num_classes = input.shape[1] if input.ndim > 1 else input.shape[0] + valid_classes_mask = torch.logical_and( + (flat_target >= 0), (flat_target < num_classes) + ) + class_check = torch.all(torch.logical_or(ignore_classes_mask, valid_classes_mask)) + torch._check( + isinstance(target, FakeTensor) or bool(class_check.item()), + lambda: "A target class is out-of-bounds and not the ignore index.", + ) + """ + + ignore_class_weight = torch.scalar_tensor(0, dtype=input.dtype, device=input.device) + class_weight = ( + torch.scalar_tensor(1, dtype=input.dtype, device=input.device) + if weight is None + else weight[flat_target] + ) + current_weight = torch.where( + ignore_classes_mask, + ignore_class_weight, + class_weight, + ) + + if input.ndim == 1: + # implicit batch size = 1 + # input (1 batch size, C classes) + loss = -input[target] * current_weight + elif input.ndim == 2: + # input (N batch size, C classes) + batch_size = input.shape[0] + loss = -input[torch.arange(batch_size), target] * current_weight + else: + # 3D case (N batch size, C classe, K dimensions) + # input (N batch size, C classes, K) + batch_size = input.shape[0] + extent = input.shape[2] + numel = batch_size * extent + indices = torch.arange(numel) + bdx = indices // extent + kdx = indices % extent + loss = -input[bdx, flat_target, kdx] * current_weight + loss = torch.reshape(loss, target.shape) + + if reduction == "none": + return loss + elif reduction == "sum": + return torch.sum(loss) + else: + # calculate weighted mean of the loss function + return torch.sum(loss) / torch.sum(current_weight) + + +@register_decomposition(aten.nll_loss) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("input",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def nll_loss( + input: TensorLikeType, + target: TensorLikeType, + weight: Optional[TensorLikeType] = None, + size_average: Optional[bool] = None, + ignore_index: int = -100, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.nll_loss + """ + torch._check( + input.ndim > 0, + lambda: f"Expected input tensor to have 1 or more dimensions (got {input.ndim})", + ) + + # TODO: raise exception instead of converting value + # msg = "size_average and reduce args are deprecated, please use reduction argument." + # Convert these options for consistency with the eager mode + if size_average is not None or reduce is not None: + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + + # The expected behavior when the target and input have zero elements: + # reduction = 'none' --- tensor([]) + # reduction = 'sum' --- tensor(0.) + # reduction = 'mean' --- tensor(nan) + # Mean reduction on empty tensors produces NaN. See the discussion in + # https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162 + if input.numel() == 0 and target.numel() == 0: + if reduction == "none": + return torch.zeros_like(target) + elif reduction == "sum": + return torch.empty_like(target) + else: + return torch.full_like(target, float("nan")) + + # The _nll_loss_nd helper function handles the most common cases. + # ndim == 1 (Single Example) + # => Batch Size: 1, Input: (C), Target: () + # ndim == 2 (k = 1) + # => Batch Size: N, Input: (N, C), Target: (N) + # ndim == 3 (k > 1) + # => Batch Size: N, Input: (N, C, K), Target: (N, K) + if input.ndim <= 3: + return _nll_loss_nd(input, target, weight, reduction, ignore_index) + + # For ndim > 3, we reshape the input and target to 3-D case. + # Input (N batch-size, C classes, k-dimensions) + # Target (N batch-size, k-dimensions) + torch._check( + input.ndim > 0 and target.ndim > 0 and target.shape[1:] == input.shape[2:], + lambda: ( + "Expected input and target to both have ndim > 0 and " + "target.shape[1:] == input.shape[2:], but got " + f"target.shape {target.shape} and input.shape {input.shape}" + ), + ) + + batch_size = input.shape[0] + num_classes = input.shape[1] + out_size = [batch_size] + list(target.shape[1:]) + + input = torch.reshape(input, [batch_size, num_classes, -1]) + target = torch.reshape(target, [batch_size, -1]) + if reduction != "none": + return _nll_loss_nd(input, target, weight, reduction, ignore_index) + else: + result = _nll_loss_nd(input, target, weight, reduction, ignore_index) + # reshape flattened inner-dim to original k-dimensions + return torch.reshape(result, out_size) + + +# TODO: This ref supports int reduction and out kwarg to be compatible with ATen: +# https://github.com/pytorch/pytorch/issues/83931 +# TODO: Could be rewritten to support complex: +# https://github.com/pytorch/pytorch/pull/85041 +@register_decomposition(aten.huber_loss) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def huber_loss( + input: TensorLikeType, + target: TensorLikeType, + reduction: Union[str, int] = "mean", + delta: float = 1.0, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.huber_loss + """ + if type(reduction) is int: + reduction = _reduction_int_to_str(reduction) + _check_reduction_value(reduction) # type: ignore[arg-type] + torch._check( + delta > 0, + lambda: "huber_loss does not support non-positive values for delta.", + ) + z = (input - target).abs() + loss = torch.where(z < delta, 0.5 * z * z, delta * (z - 0.5 * delta)) + return _apply_loss_reduction(loss, reduction) # type: ignore[arg-type] + + +# tanhshrink does not use _make_elementwise_unary_reference because it does not support out +@elementwise_unary_scalar_wrapper +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def tanhshrink(a: TensorLikeType) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.tanhshrink + """ + if not isinstance(a, TensorLike): + raise RuntimeError( + "Expected a tensor input for an elementwise unary operation!" + ) + return a - torch.tanh(a) + + +@register_decomposition(aten.threshold) +@_inplace_wrapper +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def threshold( + a: TensorLikeType, + threshold: NumberType, + value: Union[bool, int, float], + inplace: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.threshold + """ + + if inplace: + raise NotImplementedError + + return torch.where(a <= threshold, value, a) + + +# CompositeImplicitAutograd - don't register decomp +# No elementwise type promotion - core op doesn't explicitly type promote +def triplet_margin_loss( + anchor: TensorLikeType, + positive: TensorLikeType, + negative: TensorLikeType, + margin: float = 1.0, + p: float = 2, + eps: float = 1e-6, + swap: bool = False, + size_average: Optional[bool] = None, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + + # torch.nn.functional.triplet_margin_with_distance_loss has no ref defined + # since it's a pure Python implementation. Use this helper instead. + return _triplet_margin_with_distance_loss( + anchor=anchor, + positive=positive, + negative=negative, + distance_function=lambda x, y: torch.pairwise_distance(x, y, p, eps), + margin=margin, + swap=swap, + reduction=reduction, + ) + + +# Pure Python impl - don't register decomp and don't add a ref. Defined as a +# helper here since triplet_margin_loss can be nicely implemented with it. +def _triplet_margin_with_distance_loss( + anchor: TensorLikeType, + positive: TensorLikeType, + negative: TensorLikeType, + *, + distance_function: Optional[ + Callable[[TensorLikeType, TensorLikeType], TensorLikeType] + ] = None, + margin: float = 1.0, + swap: bool = False, + reduction: str = "mean", +) -> TensorLikeType: + _check_reduction_value(reduction) + + a_dim = anchor.ndim + p_dim = positive.ndim + n_dim = negative.ndim + torch._check( + a_dim == p_dim and p_dim == n_dim, + lambda: ( + f"The anchor, positive, and negative tensors are expected to have " + f"the same number of dimensions, but got: anchor {a_dim}D, " + f"positive {p_dim}D, and negative {n_dim}D inputs" + ), + ) + + if distance_function is None: + distance_function = torch.pairwise_distance + + dist_pos = distance_function(anchor, positive) + dist_neg = distance_function(anchor, negative) + # The distance swap is described in the paper "Learning shallow + # convolutional feature descriptors with triplet losses" by V. Balntas, E. + # Riba et al. If True, and if the positive example is closer to the + # negative example than the anchor is, swaps the positive example and the + # anchor in the loss computation. + if swap: + dist_swap = distance_function(positive, negative) + dist_neg = torch.minimum(dist_neg, dist_swap) + loss = torch.clamp_min(margin + dist_pos - dist_neg, 0) + return _apply_loss_reduction(loss, reduction) + + +@register_decomposition(aten.hardtanh) +@_inplace_wrapper +@out_wrapper() +@elementwise_unary_scalar_wrapper +@elementwise_type_promotion_wrapper( + type_promoting_args=("a"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def hardtanh( + a: TensorLikeType, + min_val: NumberType = -1, + max_val: NumberType = 1, + inplace: bool = False, +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.hardtanh + """ + if inplace: + raise NotImplementedError + if utils.is_boolean_dtype(a.dtype): + raise RuntimeError("Bool inputs not supported for hardtanh") + + # preserve legacy behavior of boundaries not causing type promotion + if utils.is_integer_dtype(a.dtype): + min_val = int(min_val) # type: ignore[arg-type] + max_val = int(max_val) # type: ignore[arg-type] + if not (a.dtype != torch.uint8 or (min_val >= 0 and max_val >= 0)): + raise RuntimeError( + "Cannot do hardtanh on an unsigned type with negative limits" + ) + return torch.clamp(a, min_val, max_val) # type: ignore[arg-type] + + +@register_decomposition(aten.gelu) +@out_wrapper() +@elementwise_unary_scalar_wrapper +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def gelu(a: TensorLikeType, approximate: str = "none") -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.gelu + """ + if not isinstance(a, TensorLike): + raise RuntimeError( + "Expected a tensor input for an elementwise unary operation!" + ) + M_SQRT2 = 1.41421356237309504880 + M_SQRT1_2 = 0.70710678118654752440 + M_2_SQRTPI = 1.12837916709551257390 + if approximate == "tanh": + kBeta = M_SQRT2 * M_2_SQRTPI * 0.5 + kKappa = 0.044715 + a_cube = a * a * a + inner = kBeta * (a + kKappa * a_cube) + return 0.5 * a * (1 + torch.tanh(inner)) + elif approximate == "none": + kAlpha = M_SQRT1_2 + return a * 0.5 * (1 + torch.erf(a * kAlpha)) + else: + raise RuntimeError("approximate argument must be either none or tanh.") + + +# CompositeImplicitAutograd - don't register decomp +@elementwise_type_promotion_wrapper( + type_promoting_args=("input", "target"), + type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT, +) +def poisson_nll_loss( + input: TensorLikeType, + target: TensorLikeType, + log_input: bool = True, + full: bool = False, + size_average: Optional[bool] = None, + eps: float = 1e-8, + reduce: Optional[bool] = None, + reduction: str = "mean", +) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.poisson_nll_loss + """ + if size_average is not None or reduce is not None: + # TODO: Raise exception instead of converting value. This is only for + # primTorch since it can drop support for deprecated arguments. + # msg = "size_average and reduce args are deprecated, please use reduction argument." + reduction = _get_string_reduction_arg(size_average=size_average, reduce=reduce) + _check_reduction_value(reduction) + if log_input: + loss = torch.exp(input) - target * input + else: + loss = input - target * torch.log(input + eps) + + if full: + stirling_term = ( + target * torch.log(target) - target + 0.5 * torch.log(2 * torch.pi * target) + ) + # avoid inplace add + loss = loss + stirling_term.masked_fill(target <= 1, 0) + return _apply_loss_reduction(loss, reduction) + + +@register_decomposition(aten.prelu) +@elementwise_type_promotion_wrapper( + type_promoting_args=("a", "weight"), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def prelu(a: TensorLikeType, weight: TensorLikeType) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.prelu + """ + torch._check( + isinstance(a, TensorLike), + lambda: f"prelu: Expected `a` to be tensor, but got: {type(a)}", + ) + torch._check( + isinstance(weight, TensorLike), + lambda: f"prelu: Expected `weight` to be tensor, but got: {type(weight)}", + ) + + if weight.numel() != 1: + torch._check(a.ndim > 0, lambda: "Not allow zero-dim input tensor.") + channel_size = a.shape[1] if a.ndim >= 2 else 1 + torch._check( + weight.numel() == channel_size, + lambda: f"Mismatch of parameter numbers and input channel size. Found parameter numbers =" + f" {weight.numel()} and channel size = {channel_size}.", + ) + + torch._check( + weight.ndim == 0 or weight.ndim == 1, + lambda: f"prelu: Expected `weight` to be a scalar or 1D tensor, but got: " + f"ndim = {weight.ndim}", + ) + if a.ndim == 0: + weight = weight[0] if weight.ndim == 1 else weight + else: + weight = prims.broadcast_in_dim( + weight, a.shape, tuple() if weight.ndim == 0 else (0 if a.ndim == 1 else 1,) + ) + + return torch.where(a > 0, a, a * weight) + + +@register_decomposition(aten.relu6) +@_inplace_wrapper +@out_wrapper() +def relu6(a: TensorLikeType, inplace: bool = False) -> TensorLikeType: + """ + Reference implementation of torch.nn.functional.relu6 + """ + if inplace: + raise NotImplementedError + + # See https://github.com/pytorch/pytorch/pull/81142#discussion_r918220126 + # It may be better to use clamp here, but we use hardtanh to replicate + # the behavior of the existing implementation + return torch.nn.functional.hardtanh(a, 0, 6) + + +@register_decomposition(aten.glu) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def glu(a: TensorLikeType, dim: int = -1) -> TensorLikeType: + dim = utils.canonicalize_dims(a.ndim, dim) + torch._check( + a.shape[dim] % 2 == 0, + lambda: f"Halving dimension must be even, but dimension {dim} is size {a.shape[dim]}", + ) + b, c = torch.tensor_split(a, 2, dim) + + return b * torch.sigmoid(c) + + +@register_decomposition(aten.pairwise_distance) +@out_wrapper() +def pairwise_distance( + x1: TensorLikeType, + x2: TensorLikeType, + p: NumberType = 2.0, + eps: NumberType = 1e-6, + keepdim=False, +) -> TensorLikeType: + return torch.linalg.vector_norm(x1 - x2 + eps, ord=p, dim=-1, keepdim=keepdim) + + +@register_decomposition(aten.pdist) +@out_wrapper() +@elementwise_type_promotion_wrapper( + type_promoting_args=("a",), + type_promotion_kind=utils.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT, +) +def pdist(a: TensorLikeType, p: float = 2) -> TensorLikeType: + torch._check(a.ndim == 2, lambda: f"pdist only supports 2D tensors, got: {a.ndim}D") + torch._check(p >= 0, lambda: "pdist only supports non-negative p values") + # For p == 2 we can use an efficient implementation, but other values of p + # require creating a much bigger tensor for an intermediate step + if p == 2: + aTa = torch.mm(a, a.T) + aTa_diag = torch.diag(aTa) + t = torch.sqrt(torch.clamp(aTa_diag + aTa_diag.unsqueeze(-1) - 2 * aTa, min=0)) + else: + t = torch.linalg.vector_norm(a.unsqueeze(1) - a, ord=p, dim=2) + i = torch.triu_indices(t.shape[0], t.shape[1], offset=1, device=a.device) + return t.flatten().index_select(0, i[0] * t.shape[0] + i[1]) + + +@register_decomposition(aten.pixel_shuffle) +@out_wrapper() +def pixel_shuffle(self: Tensor, upscale_factor: int): + torch._check( + self.dim() >= 3, + lambda: f"pixel_shuffle expects input to have at least 3 dimensions, but got input with {self.dim} dimension(s)", + ) + batch = self.shape[:-3] + C_out = self.shape[-3] // upscale_factor**2 + HW_out = (self.shape[-2] * upscale_factor, self.shape[-1] * upscale_factor) + n = len(batch) + B_dims = range(n) + C_dim, r1_dim, r2_dim, H_dim, W_dim = range(n, n + 5) + return ( + self.view( + *batch, + C_out, + upscale_factor, + upscale_factor, + self.shape[-2], + self.shape[-1], + ) + .permute(*B_dims, C_dim, H_dim, r1_dim, W_dim, r2_dim) + .reshape(*batch, C_out, *HW_out) + .clone(memory_format=utils.suggest_memory_format(self)) + ) + + +@register_decomposition(aten.pixel_unshuffle) +@out_wrapper() +def pixel_unshuffle(self: Tensor, downscale_factor: int): + torch._check( + self.dim() >= 3, + lambda: f"pixel_unshuffle expects input to have at least 3 dimensions, but got input with {self.dim} dimension(s)", + ) + batch = self.shape[:-3] + C_out = self.shape[-3] * downscale_factor**2 + HW_out = (self.shape[-2] // downscale_factor, self.shape[-1] // downscale_factor) + n = len(batch) + B_dims = range(n) + C_dim, H_dim, r1_dim, W_dim, r2_dim = range(n, n + 5) + return ( + self.view( + *batch, + self.shape[-3], + HW_out[0], + downscale_factor, + HW_out[1], + downscale_factor, + ) + .permute(*B_dims, C_dim, r1_dim, r2_dim, H_dim, W_dim) + .reshape(*batch, C_out, *HW_out) + .clone(memory_format=utils.suggest_memory_format(self)) + ) + + +# Needed as aten.{celu_,elu_...} exist (even if they don't have the in-place kwarg) +celu_ = _make_inplace(celu) +elu_ = _make_inplace(elu) +mish_ = _make_inplace(mish) +selu_ = _make_inplace(selu) +threshold_ = _make_inplace(threshold) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9eab2640d29591c6d5434735103ecb00c1508d47 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/_refs/nn/functional/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/amp/__init__.py b/llmeval-env/lib/python3.10/site-packages/torch/amp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e0be696975584bbc0460e513eea0dd0d73736ea8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/amp/__init__.py @@ -0,0 +1,2 @@ +from .autocast_mode import _enter_autocast, _exit_autocast, autocast +from .grad_scaler import GradScaler diff --git a/llmeval-env/lib/python3.10/site-packages/torch/amp/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/amp/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3efc42f5cfbb44cd81a009aef22b06934d1209b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/amp/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/amp/__pycache__/autocast_mode.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/amp/__pycache__/autocast_mode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35c6c7acc07e4507630638d5284924cb117f9682 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/amp/__pycache__/autocast_mode.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/amp/autocast_mode.py b/llmeval-env/lib/python3.10/site-packages/torch/amp/autocast_mode.py new file mode 100644 index 0000000000000000000000000000000000000000..30c6aefcf1bdaf24943b408694c59971a8033ca6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/amp/autocast_mode.py @@ -0,0 +1,436 @@ +import functools +import warnings + +from typing import Any, Optional + +import torch +from torch.types import _dtype + +__all__ = ["autocast_decorator", "autocast"] + + +def autocast_decorator(autocast_instance, func): + @functools.wraps(func) + def decorate_autocast(*args, **kwargs): + with autocast_instance: + return func(*args, **kwargs) + + decorate_autocast.__script_unsupported = "@autocast() decorator is not supported in script mode" # type: ignore[attr-defined] + return decorate_autocast + + +class autocast: + r""" + Instances of :class:`autocast` serve as context managers or decorators that + allow regions of your script to run in mixed precision. + + In these regions, ops run in an op-specific dtype chosen by autocast + to improve performance while maintaining accuracy. + See the :ref:`Autocast Op Reference` for details. + + When entering an autocast-enabled region, Tensors may be any type. + You should not call ``half()`` or ``bfloat16()`` on your model(s) or inputs when using autocasting. + + :class:`autocast` should wrap only the forward pass(es) of your network, including the loss + computation(s). Backward passes under autocast are not recommended. + Backward ops run in the same type that autocast used for corresponding forward ops. + + Example for CUDA Devices:: + + # Creates model and optimizer in default precision + model = Net().cuda() + optimizer = optim.SGD(model.parameters(), ...) + + for input, target in data: + optimizer.zero_grad() + + # Enables autocasting for the forward pass (model + loss) + with torch.autocast(device_type="cuda"): + output = model(input) + loss = loss_fn(output, target) + + # Exits the context manager before backward() + loss.backward() + optimizer.step() + + See the :ref:`CUDA Automatic Mixed Precision examples` for usage (along with gradient scaling) + in more complex scenarios (e.g., gradient penalty, multiple models/losses, custom autograd functions). + + :class:`autocast` can also be used as a decorator, e.g., on the ``forward`` method of your model:: + + class AutocastModel(nn.Module): + ... + @torch.autocast(device_type="cuda") + def forward(self, input): + ... + + Floating-point Tensors produced in an autocast-enabled region may be ``float16``. + After returning to an autocast-disabled region, using them with floating-point + Tensors of different dtypes may cause type mismatch errors. If so, cast the Tensor(s) + produced in the autocast region back to ``float32`` (or other dtype if desired). + If a Tensor from the autocast region is already ``float32``, the cast is a no-op, + and incurs no additional overhead. + CUDA Example:: + + # Creates some tensors in default dtype (here assumed to be float32) + a_float32 = torch.rand((8, 8), device="cuda") + b_float32 = torch.rand((8, 8), device="cuda") + c_float32 = torch.rand((8, 8), device="cuda") + d_float32 = torch.rand((8, 8), device="cuda") + + with torch.autocast(device_type="cuda"): + # torch.mm is on autocast's list of ops that should run in float16. + # Inputs are float32, but the op runs in float16 and produces float16 output. + # No manual casts are required. + e_float16 = torch.mm(a_float32, b_float32) + # Also handles mixed input types + f_float16 = torch.mm(d_float32, e_float16) + + # After exiting autocast, calls f_float16.float() to use with d_float32 + g_float32 = torch.mm(d_float32, f_float16.float()) + + CPU Training Example:: + + # Creates model and optimizer in default precision + model = Net() + optimizer = optim.SGD(model.parameters(), ...) + + for epoch in epochs: + for input, target in data: + optimizer.zero_grad() + + # Runs the forward pass with autocasting. + with torch.autocast(device_type="cpu", dtype=torch.bfloat16): + output = model(input) + loss = loss_fn(output, target) + + loss.backward() + optimizer.step() + + + CPU Inference Example:: + + # Creates model in default precision + model = Net().eval() + + with torch.autocast(device_type="cpu", dtype=torch.bfloat16): + for input in data: + # Runs the forward pass with autocasting. + output = model(input) + + CPU Inference Example with Jit Trace:: + + class TestModel(nn.Module): + def __init__(self, input_size, num_classes): + super().__init__() + self.fc1 = nn.Linear(input_size, num_classes) + def forward(self, x): + return self.fc1(x) + + input_size = 2 + num_classes = 2 + model = TestModel(input_size, num_classes).eval() + + # For now, we suggest to disable the Jit Autocast Pass, + # As the issue: https://github.com/pytorch/pytorch/issues/75956 + torch._C._jit_set_autocast_mode(False) + + with torch.cpu.amp.autocast(cache_enabled=False): + model = torch.jit.trace(model, torch.randn(1, input_size)) + model = torch.jit.freeze(model) + # Models Run + for _ in range(3): + model(torch.randn(1, input_size)) + + Type mismatch errors *in* an autocast-enabled region are a bug; if this is what you observe, + please file an issue. + + ``autocast(enabled=False)`` subregions can be nested in autocast-enabled regions. + Locally disabling autocast can be useful, for example, if you want to force a subregion + to run in a particular ``dtype``. Disabling autocast gives you explicit control over + the execution type. In the subregion, inputs from the surrounding region + should be cast to ``dtype`` before use:: + + # Creates some tensors in default dtype (here assumed to be float32) + a_float32 = torch.rand((8, 8), device="cuda") + b_float32 = torch.rand((8, 8), device="cuda") + c_float32 = torch.rand((8, 8), device="cuda") + d_float32 = torch.rand((8, 8), device="cuda") + + with torch.autocast(device_type="cuda"): + e_float16 = torch.mm(a_float32, b_float32) + with torch.autocast(device_type="cuda", enabled=False): + # Calls e_float16.float() to ensure float32 execution + # (necessary because e_float16 was created in an autocasted region) + f_float32 = torch.mm(c_float32, e_float16.float()) + + # No manual casts are required when re-entering the autocast-enabled region. + # torch.mm again runs in float16 and produces float16 output, regardless of input types. + g_float16 = torch.mm(d_float32, f_float32) + + The autocast state is thread-local. If you want it enabled in a new thread, the context manager or decorator + must be invoked in that thread. This affects :class:`torch.nn.DataParallel` and + :class:`torch.nn.parallel.DistributedDataParallel` when used with more than one GPU per process + (see :ref:`Working with Multiple GPUs`). + + Args: + device_type(str, required): Device type to use. Possible values are: 'cuda', 'cpu', 'xpu' and 'hpu'. + The type is the same as the `type` attribute of a :class:`torch.device`. + Thus, you may obtain the device type of a tensor using `Tensor.device.type`. + enabled(bool, optional): Whether autocasting should be enabled in the region. + Default: ``True`` + dtype(torch_dtype, optional): Whether to use torch.float16 or torch.bfloat16. + cache_enabled(bool, optional): Whether the weight cache inside autocast should be enabled. + Default: ``True`` + """ + + def __init__( + self, + device_type: str, + dtype: Optional[_dtype] = None, + enabled: bool = True, + cache_enabled: Optional[bool] = None, + ): + if torch._jit_internal.is_scripting(): + self._enabled = enabled + self.device = device_type + self.fast_dtype = dtype + # TODO: support get_autocast_gpu/cpu_dtype + assert dtype is not None + return + self.device = device_type + self.custom_backend_name = torch._C._get_privateuse1_backend_name() + if self.device == "cuda": + self.fast_dtype = torch.get_autocast_gpu_dtype() + elif self.device == "cpu": + self.fast_dtype = torch.get_autocast_cpu_dtype() + elif self.device == "xpu": + self.fast_dtype = torch.xpu.get_autocast_xpu_dtype() # type: ignore[attr-defined] + elif self.device == "ipu": + self.fast_dtype = torch.get_autocast_ipu_dtype() # type: ignore[attr-defined] + elif self.device == "hpu": + self.fast_dtype = torch.hpu.get_autocast_hpu_dtype() # type: ignore[attr-defined] + elif self.device == "xla": + self.fast_dtype = torch.get_autocast_xla_dtype() # type: ignore[attr-defined] + elif self.device == self.custom_backend_name: + necessary_funcs = [ + "is_autocast_enabled", + "set_autocast_enabled", + "get_autocast_dtype", + "set_autocast_dtype", + "get_amp_supported_dtype", + ] + message = f"Tried to use AMP with the `{self.custom_backend_name}` backend, but the backend has not " + message += "registered a module or the module miss some necessary funcs. The backend should register " + message += "a module by `torch._register_device_module`, and the module must have these funcs: \n" + message += "`is_autocast_enabled() -> bool`, `set_autocast_enabled(bool) -> None`, " + message += "`get_autocast_dtype() -> torch.dtype`, `set_autocast_dtype(torch.dtype) " + message += ( + "-> None` and `get_amp_supported_dtype() -> List[torch.dtype]`. \n" + ) + + assert hasattr(torch, self.custom_backend_name), message + self.custom_device_mod = getattr(torch, self.custom_backend_name) + for func in necessary_funcs: + assert hasattr(self.custom_device_mod, func), ( + message + f"But the func `{func}` is missing. \n" + ) + + self.fast_dtype = self.custom_device_mod.get_autocast_dtype() + else: + raise RuntimeError( + f"User specified an unsupported autocast device_type '{self.device}'" + ) + self._cache_enabled = torch.is_autocast_cache_enabled() + if ( + enabled + and torch.cuda.amp.common.amp_definitely_not_available() + and self.device == "cuda" + ): + warnings.warn( + "User provided device_type of 'cuda', but CUDA is not available. Disabling" + ) + enabled = False + if dtype is not None: + self.fast_dtype = dtype + if cache_enabled is not None: + self._cache_enabled = cache_enabled + + if self.device == "cpu": + supported_dtype = [torch.bfloat16, torch.float16] + if self.fast_dtype not in supported_dtype and enabled: + error_message = "In CPU autocast, but the target dtype is not supported. Disabling autocast.\n" + error_message += "CPU Autocast only supports dtype of " + error_message += ( + ", ".join(str(dtype) for dtype in supported_dtype) + " currently." + ) + warnings.warn(error_message) + enabled = False + elif self.device == "xpu": + supported_dtype = [torch.bfloat16, torch.float16] + if self.fast_dtype not in supported_dtype: + error_message = "In XPU autocast, but the target dtype is not supported. Disabling autocast.\n" + error_message += "XPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently." + warnings.warn(error_message) + enabled = False + elif self.device == "ipu": + supported_dtypes = [torch.bfloat16, torch.float16] + if self.fast_dtype not in supported_dtypes: + error_message = "In IPU autocast, but the target dtype is not supported. Disabling autocast.\n" + error_message += "IPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently." + warnings.warn(error_message) + enabled = False + elif self.device == "hpu": + supported_dtype = [torch.bfloat16, torch.float16] + if self.fast_dtype not in supported_dtype: + error_message = "In HPU autocast, but the target dtype is not supported. Disabling autocast.\n" + error_message += "HPU Autocast only supports dtypes of torch.bfloat16 and torch.float16 currently." + warnings.warn(error_message) + enabled = False + elif self.device == self.custom_backend_name: + supported_dtype = self.custom_device_mod.get_amp_supported_dtype() + if self.fast_dtype not in supported_dtype: + error_message = f"In {self.custom_backend_name} autocast, but the target dtype is not supported. " + error_message += f"Disabling autocast.\n {self.custom_backend_name} Autocast only supports dtypes of " + error_message += ( + ", ".join(str(dtype) for dtype in supported_dtype) + " currently." + ) + warnings.warn(error_message) + enabled = False + elif self.device == "cuda": + if ( + enabled + and self.fast_dtype == torch.bfloat16 + and not torch.cuda.is_bf16_supported() + ): + raise RuntimeError( + "Current CUDA Device does not support bfloat16. Please switch dtype to float16." + ) + elif self.device == "xla": + supported_dtype = [torch.float16, torch.bfloat16] + if self.fast_dtype not in supported_dtype: + error_message = "In XLA autocast, but the target dtype is not supported. Disabling autocast.\n" + error_message += ( + "XLA Autocast only supports dtype of torch.bfloat16 currently." + ) + warnings.warn(error_message) + enabled = False + self._enabled = enabled + + def __enter__(self): + if torch._jit_internal.is_scripting(): + assert self.fast_dtype is not None + return self + + self.prev_cache_enabled = torch.is_autocast_cache_enabled() + if self.device == "cpu": + self.prev = torch.is_autocast_cpu_enabled() + self.prev_fastdtype = torch.get_autocast_cpu_dtype() + torch.set_autocast_cpu_enabled(self._enabled) + torch.set_autocast_cpu_dtype(self.fast_dtype) # type: ignore[arg-type] + torch.autocast_increment_nesting() + elif self.device == "xpu": + self.prev = torch.xpu.is_autocast_xpu_enabled() # type: ignore[attr-defined] + self.prev_fastdtype = torch.xpu.get_autocast_xpu_dtype() # type: ignore[attr-defined] + torch.xpu.set_autocast_xpu_enabled(self._enabled) # type: ignore[attr-defined] + torch.xpu.set_autocast_xpu_dtype(self.fast_dtype) # type: ignore[attr-defined] + torch.autocast_increment_nesting() + elif self.device == "ipu": + self.prev = torch.is_autocast_ipu_enabled() # type: ignore[attr-defined] + self.prev_fastdtype = torch.get_autocast_ipu_dtype() # type: ignore[attr-defined] + torch.set_autocast_ipu_enabled(self._enabled) # type: ignore[attr-defined] + torch.set_autocast_ipu_dtype(self.fast_dtype) # type: ignore[attr-defined] + torch.autocast_increment_nesting() + elif self.device == "hpu": + self.prev = torch.hpu.is_autocast_hpu_enabled() # type: ignore[attr-defined] + self.prev_fastdtype = torch.hpu.get_autocast_hpu_dtype() # type: ignore[attr-defined] + torch.hpu.set_autocast_hpu_enabled(self._enabled) # type: ignore[attr-defined] + torch.hpu.set_autocast_hpu_dtype(self.fast_dtype) # type: ignore[attr-defined] + torch.autocast_increment_nesting() + elif self.device == "xla": + self.prev = torch.is_autocast_xla_enabled() # type: ignore[attr-defined] + self.prev_fastdtype = torch.get_autocast_xla_dtype() # type: ignore[attr-defined] + torch.set_autocast_xla_enabled(self._enabled) # type: ignore[attr-defined] + torch.set_autocast_xla_dtype(self.fast_dtype) # type: ignore[attr-defined] + torch.autocast_increment_nesting() + elif self.device == self.custom_backend_name: + self.prev = self.custom_device_mod.is_autocast_enabled() + self.prev_fastdtype = self.custom_device_mod.get_autocast_dtype() + self.custom_device_mod.set_autocast_enabled(self._enabled) + self.custom_device_mod.set_autocast_dtype(self.fast_dtype) + torch.autocast_increment_nesting() + else: + self.prev = torch.is_autocast_enabled() + self.prev_fastdtype = torch.get_autocast_gpu_dtype() + torch.set_autocast_gpu_dtype(self.fast_dtype) # type: ignore[arg-type] + torch.set_autocast_enabled(self._enabled) + torch.autocast_increment_nesting() + torch.set_autocast_cache_enabled(self._cache_enabled) + + def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): # type: ignore[override] + if torch._jit_internal.is_scripting(): + return + + # Drop the cache when we exit to a nesting level that's outside any instance of autocast. + if self.device == "cpu": + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.set_autocast_cpu_enabled(self.prev) + torch.set_autocast_cpu_dtype(self.prev_fastdtype) + elif self.device == "xpu": + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.xpu.set_autocast_xpu_enabled(self.prev) # type: ignore[attr-defined] + torch.xpu.set_autocast_xpu_dtype(self.prev_fastdtype) # type: ignore[attr-defined] + elif self.device == "ipu": + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.set_autocast_ipu_enabled(self.prev) # type: ignore[attr-defined] + torch.set_autocast_ipu_dtype(self.prev_fastdtype) # type: ignore[attr-defined] + elif self.device == "hpu": + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.hpu.set_autocast_hpu_enabled(self.prev) # type: ignore[attr-defined] + torch.hpu.set_autocast_hpu_dtype(self.prev_fastdtype) # type: ignore[attr-defined] + elif self.device == "xla": + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.set_autocast_xla_enabled(self.prev) # type: ignore[attr-defined] + torch.set_autocast_xla_dtype(self.prev_fastdtype) # type: ignore[attr-defined] + elif self.device == self.custom_backend_name: + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + self.custom_device_mod.set_autocast_enabled(self.prev) + self.custom_device_mod.set_autocast_dtype(self.prev_fastdtype) + else: + if torch.autocast_decrement_nesting() == 0: + torch.clear_autocast_cache() + torch.set_autocast_enabled(self.prev) + torch.set_autocast_gpu_dtype(self.prev_fastdtype) + torch.set_autocast_cache_enabled(self.prev_cache_enabled) + return False + + def __call__(self, func): + if torch._jit_internal.is_scripting(): + return func + return autocast_decorator(self, func) + + +# These functions aren't meant for public usage. +# They are what we trace into a graph during pre_dispatch tracing +# when we encounter an autocast context manager. +def _enter_autocast(*vals): + # For pre-dispatch tracing, if a TorchFunction mode is active, we'll want to trace this into a graph. + if torch._C._is_torch_function_mode_enabled(): + return torch.overrides.handle_torch_function( + torch.amp._enter_autocast, [], *vals + ) + mode = torch.amp.autocast(*vals) + mode.__enter__() + return mode + + +def _exit_autocast(mode): + if torch._C._is_torch_function_mode_enabled(): + return torch.overrides.handle_torch_function(torch.amp._exit_autocast, [], mode) + mode.__exit__(None, None, None) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/amp/grad_scaler.py b/llmeval-env/lib/python3.10/site-packages/torch/amp/grad_scaler.py new file mode 100644 index 0000000000000000000000000000000000000000..140ec67162f83146d737beae6bb6be560a705a31 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/amp/grad_scaler.py @@ -0,0 +1,681 @@ +from __future__ import annotations + +import inspect +import warnings +from collections import abc, defaultdict +from enum import Enum +from typing import Any, cast, Dict, Iterable, List, Optional, overload, Tuple, Union + +import torch + + +__all__ = ["OptState", "GradScaler"] + + +class _MultiDeviceReplicator: + """Lazily serves copies of a tensor to requested devices. + + Copies are cached per-device. + """ + + def __init__(self, master_tensor: torch.Tensor) -> None: + self.master = master_tensor + self._per_device_tensors: Dict[torch.device, torch.Tensor] = {} + + def get(self, device: torch.device) -> torch.Tensor: + retval = self._per_device_tensors.get(device, None) + if retval is None: + retval = self.master.to(device=device, non_blocking=True, copy=True) + self._per_device_tensors[device] = retval + return retval + + +# Defines default_factory for GradScaler's _per_optimizer_states defaultdict, +# as well as associated "enum" values. Prefers defining these at top level because +# - Lambdas can't be pickled, so we don't want to supply a lambda as the factory. +# - Defining READY, UNSCALED, STEPPED and _refresh_per_optimizer_state within GradScaler +# causes a circular reference, which we'd rather avoid. +class OptState(Enum): + READY = 0 + UNSCALED = 1 + STEPPED = 2 + + +def _refresh_per_optimizer_state() -> Dict[str, Any]: + return {"stage": OptState.READY, "found_inf_per_device": {}} + + +class GradScaler: + """An instance ``scaler`` of :class:`GradScaler`. + + Helps perform the steps of gradient scaling + conveniently. + + * ``scaler.scale(loss)`` multiplies a given loss by ``scaler``'s current scale factor. + * ``scaler.step(optimizer)`` safely unscales gradients and calls ``optimizer.step()``. + * ``scaler.update()`` updates ``scaler``'s scale factor. + + Example:: + + # Creates a GradScaler once at the beginning of training. + scaler = GradScaler() + + for epoch in epochs: + for input, target in data: + optimizer.zero_grad() + output = model(input) + loss = loss_fn(output, target) + + # Scales loss. Calls backward() on scaled loss to create scaled gradients. + scaler.scale(loss).backward() + + # scaler.step() first unscales gradients of the optimizer's params. + # If gradients don't contain infs/NaNs, optimizer.step() is then called, + # otherwise, optimizer.step() is skipped. + scaler.step(optimizer) + + # Updates the scale for next iteration. + scaler.update() + + See the :ref:`Automatic Mixed Precision examples` for usage + (along with autocasting) in more complex cases like gradient clipping, gradient accumulation, gradient penalty, + and multiple losses/optimizers. + + ``scaler`` dynamically estimates the scale factor each iteration. To minimize gradient underflow, + a large scale factor should be used. However, ``float16`` values can "overflow" (become inf or NaN) if + the scale factor is too large. Therefore, the optimal scale factor is the largest factor that can be used + without incurring inf or NaN gradient values. + ``scaler`` approximates the optimal scale factor over time by checking the gradients for infs and NaNs during every + ``scaler.step(optimizer)`` (or optional separate ``scaler.unscale_(optimizer)``, see :meth:`unscale_`). + + * If infs/NaNs are found, ``scaler.step(optimizer)`` skips the underlying ``optimizer.step()`` (so the params + themselves remain uncorrupted) and ``update()`` multiplies the scale by ``backoff_factor``. + + * If no infs/NaNs are found, ``scaler.step(optimizer)`` runs the underlying ``optimizer.step()`` as usual. + If ``growth_interval`` unskipped iterations occur consecutively, ``update()`` multiplies the scale by + ``growth_factor``. + + The scale factor often causes infs/NaNs to appear in gradients for the first few iterations as its + value calibrates. ``scaler.step`` will skip the underlying ``optimizer.step()`` for these + iterations. After that, step skipping should occur rarely (once every few hundred or thousand iterations). + + Args: + device (str, optional, default="cuda"): Device type to use. Possible values are: 'cuda' and 'cpu'. + The type is the same as the `type` attribute of a :class:`torch.device`. + Thus, you may obtain the device type of a tensor using `Tensor.device.type`. + init_scale (float, optional, default=2.**16): Initial scale factor. + growth_factor (float, optional, default=2.0): Factor by which the scale is multiplied during + :meth:`update` if no inf/NaN gradients occur for ``growth_interval`` consecutive iterations. + backoff_factor (float, optional, default=0.5): Factor by which the scale is multiplied during + :meth:`update` if inf/NaN gradients occur in an iteration. + growth_interval (int, optional, default=2000): Number of consecutive iterations without inf/NaN gradients + that must occur for the scale to be multiplied by ``growth_factor``. + enabled (bool, optional): If ``False``, disables gradient scaling. :meth:`step` simply + invokes the underlying ``optimizer.step()``, and other methods become no-ops. + Default: ``True`` + """ + + def __init__( + self, + device: str = "cuda", + init_scale: float = 2.0**16, + growth_factor: float = 2.0, + backoff_factor: float = 0.5, + growth_interval: int = 2000, + enabled: bool = True, + ) -> None: + self._device = device + self._enabled = enabled + if self._device == "cuda": + if enabled and torch.cuda.amp.common.amp_definitely_not_available(): + warnings.warn( + "torch.cuda.amp.GradScaler is enabled, but CUDA is not available. Disabling." + ) + self._enabled = False + + if self._enabled: + assert growth_factor > 1.0, "The growth factor must be > 1.0." + assert backoff_factor < 1.0, "The backoff factor must be < 1.0." + + self._init_scale = init_scale + # self._scale will be lazily initialized during the first call to scale() + self._scale: Optional[torch.Tensor] = None + self._growth_factor = growth_factor + self._backoff_factor = backoff_factor + self._growth_interval = growth_interval + self._init_growth_tracker = 0 + # self._growth_tracker will be lazily initialized during the first call to scale() + self._growth_tracker: Optional[torch.Tensor] = None + self._per_optimizer_states: Dict[int, Dict[str, Any]] = defaultdict( + _refresh_per_optimizer_state + ) + + def _check_scale_growth_tracker( + self, funcname: str + ) -> Tuple[torch.Tensor, torch.Tensor]: + fix = "This may indicate your script did not use scaler.scale(loss or outputs) earlier in the iteration." + assert self._scale is not None, ( + f"Attempted {funcname} but _scale is None. " + fix + ) + assert self._growth_tracker is not None, ( + f"Attempted {funcname} but _growth_tracker is None. " + fix + ) + return (self._scale, self._growth_tracker) + + def _lazy_init_scale_growth_tracker(self, dev: torch.device) -> None: + assert self._growth_tracker is None, "_growth_tracker initialized before _scale" + self._scale = torch.full((), self._init_scale, dtype=torch.float32, device=dev) + self._growth_tracker = torch.full( + (), self._init_growth_tracker, dtype=torch.int32, device=dev + ) + + @overload + def scale(self, outputs: torch.Tensor) -> torch.Tensor: + ... + + @overload + def scale(self, outputs: List[torch.Tensor]) -> List[torch.Tensor]: + ... + + @overload + def scale(self, outputs: Tuple[torch.Tensor, ...]) -> Tuple[torch.Tensor, ...]: + ... + + @overload + def scale(self, outputs: Iterable[torch.Tensor]) -> Iterable[torch.Tensor]: + ... + + def scale( + self, + outputs: Union[torch.Tensor, Iterable[torch.Tensor]], + ) -> Union[torch.Tensor, Iterable[torch.Tensor]]: + """ + Multiplies ('scales') a tensor or list of tensors by the scale factor. + + Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned + unmodified. + + Args: + outputs (Tensor or iterable of Tensors): Outputs to scale. + """ + if not self._enabled: + return outputs + + # Short-circuit for the common case. + if isinstance(outputs, torch.Tensor): + if self._scale is None: + self._lazy_init_scale_growth_tracker(outputs.device) + assert self._scale is not None + return outputs * self._scale.to(device=outputs.device, non_blocking=True) + + # Invoke the more complex machinery only if we're treating multiple outputs. + stash: List[ + _MultiDeviceReplicator + ] = [] # holds a reference that can be overwritten by apply_scale + + def apply_scale(val: Union[torch.Tensor, Iterable[torch.Tensor]]): + if isinstance(val, torch.Tensor): + if len(stash) == 0: + if self._scale is None: + self._lazy_init_scale_growth_tracker(val.device) + assert self._scale is not None + stash.append(_MultiDeviceReplicator(self._scale)) + return val * stash[0].get(val.device) + if isinstance(val, abc.Iterable): + iterable = map(apply_scale, val) + if isinstance(val, (list, tuple)): + return type(val)(iterable) + return iterable + raise ValueError("outputs must be a Tensor or an iterable of Tensors") + + return apply_scale(outputs) + + def _unscale_grads_( + self, + optimizer: torch.optim.Optimizer, + inv_scale: torch.Tensor, + found_inf: torch.Tensor, + allow_fp16: bool, + ) -> Dict[torch.device, torch.Tensor]: + per_device_inv_scale = _MultiDeviceReplicator(inv_scale) + per_device_found_inf = _MultiDeviceReplicator(found_inf) + + # To set up _amp_foreach_non_finite_check_and_unscale_, split grads by device and dtype. + # There could be hundreds of grads, so we'd like to iterate through them just once. + # However, we don't know their devices or dtypes in advance. + + # https://stackoverflow.com/questions/5029934/defaultdict-of-defaultdict + # Google says mypy struggles with defaultdicts type annotations. + per_device_and_dtype_grads: Dict[ + torch.device, Dict[torch.dtype, List[torch.Tensor]] + ] = defaultdict(lambda: defaultdict(list)) + with torch.no_grad(): + for group in optimizer.param_groups: + for param in group["params"]: + assert isinstance(param, torch.Tensor) + if param.grad is None: + continue + if (not allow_fp16) and param.grad.dtype == torch.float16: + raise ValueError("Attempting to unscale FP16 gradients.") + if param.grad.is_sparse: + # is_coalesced() == False means the sparse grad has values with duplicate indices. + # coalesce() deduplicates indices and adds all values that have the same index. + # For scaled fp16 values, there's a good chance coalescing will cause overflow, + # so we should check the coalesced _values(). + if param.grad.dtype is torch.float16: + param.grad = param.grad.coalesce() + to_unscale = param.grad._values() + else: + to_unscale = param.grad + + # TODO: is there a way to split by device and dtype without appending in the inner loop? + per_device_and_dtype_grads[to_unscale.device][ + to_unscale.dtype + ].append(to_unscale) + + for device, per_dtype_grads in per_device_and_dtype_grads.items(): + for grads in per_dtype_grads.values(): + torch._amp_foreach_non_finite_check_and_unscale_( + grads, + per_device_found_inf.get(device), + per_device_inv_scale.get(device), + ) + + return per_device_found_inf._per_device_tensors + + def unscale_(self, optimizer: torch.optim.Optimizer) -> None: + """ + Divides ("unscales") the optimizer's gradient tensors by the scale factor. + + :meth:`unscale_` is optional, serving cases where you need to + :ref:`modify or inspect gradients` + between the backward pass(es) and :meth:`step`. + If :meth:`unscale_` is not called explicitly, gradients will be unscaled automatically during :meth:`step`. + + Simple example, using :meth:`unscale_` to enable clipping of unscaled gradients:: + + ... + scaler.scale(loss).backward() + scaler.unscale_(optimizer) + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) + scaler.step(optimizer) + scaler.update() + + Args: + optimizer (torch.optim.Optimizer): Optimizer that owns the gradients to be unscaled. + + .. note:: + :meth:`unscale_` does not incur a CPU-GPU sync. + + .. warning:: + :meth:`unscale_` should only be called once per optimizer per :meth:`step` call, + and only after all gradients for that optimizer's assigned parameters have been accumulated. + Calling :meth:`unscale_` twice for a given optimizer between each :meth:`step` triggers a RuntimeError. + + .. warning:: + :meth:`unscale_` may unscale sparse gradients out of place, replacing the ``.grad`` attribute. + """ + if not self._enabled: + return + + self._check_scale_growth_tracker("unscale_") + + optimizer_state = self._per_optimizer_states[id(optimizer)] + + if optimizer_state["stage"] is OptState.UNSCALED: + raise RuntimeError( + "unscale_() has already been called on this optimizer since the last update()." + ) + elif optimizer_state["stage"] is OptState.STEPPED: + raise RuntimeError("unscale_() is being called after step().") + + # FP32 division can be imprecise for certain compile options, so we carry out the reciprocal in FP64. + assert self._scale is not None + inv_scale = self._scale.double().reciprocal().float() + found_inf = torch.full((), 0.0, dtype=torch.float32, device=self._scale.device) + + optimizer_state["found_inf_per_device"] = self._unscale_grads_( + optimizer, inv_scale, found_inf, False + ) + optimizer_state["stage"] = OptState.UNSCALED + + def _maybe_opt_step( + self, + optimizer: torch.optim.Optimizer, + optimizer_state: Dict[str, Any], + *args: Any, + **kwargs: Any, + ) -> Optional[float]: + retval: Optional[float] = None + if not sum(v.item() for v in optimizer_state["found_inf_per_device"].values()): + retval = optimizer.step(*args, **kwargs) + return retval + + def step( + self, optimizer: torch.optim.Optimizer, *args: Any, **kwargs: Any + ) -> Optional[float]: + """Invoke ``unscale_(optimizer)`` followed by parameter update, if gradients are not infs/NaN. + + :meth:`step` carries out the following two operations: + + 1. Internally invokes ``unscale_(optimizer)`` (unless :meth:`unscale_` was explicitly called for ``optimizer`` + earlier in the iteration). As part of the :meth:`unscale_`, gradients are checked for infs/NaNs. + 2. If no inf/NaN gradients are found, invokes ``optimizer.step()`` using the unscaled + gradients. Otherwise, ``optimizer.step()`` is skipped to avoid corrupting the params. + + ``*args`` and ``**kwargs`` are forwarded to ``optimizer.step()``. + + Returns the return value of ``optimizer.step(*args, **kwargs)``. + + Args: + optimizer (torch.optim.Optimizer): Optimizer that applies the gradients. + args: Any arguments. + kwargs: Any keyword arguments. + + .. warning:: + Closure use is not currently supported. + """ + if not self._enabled: + return optimizer.step(*args, **kwargs) + + if "closure" in kwargs: + raise RuntimeError( + "Closure use is not currently supported if GradScaler is enabled." + ) + + self._check_scale_growth_tracker("step") + + optimizer_state = self._per_optimizer_states[id(optimizer)] + + if optimizer_state["stage"] is OptState.STEPPED: + raise RuntimeError( + "step() has already been called since the last update()." + ) + + retval: Optional[float] = None + + if getattr(optimizer, "_step_supports_amp_scaling", False): + # This optimizer has customized scale-handling logic, so we can call optimizer.step() directly. + # The contract with custom optimizers is that their step() should accept an additional, + # optional grad_scaler kwarg. We append self to the kwargs so the custom optimizer has full information: + # it can query its own state, invoke unscale_ on itself, etc + # The contract above is being deprecated to avoid introducing `grad_scaler: GradScaler` argument + # to `Optimizer.step`. The new behavior is going to add two Tensor attributes of `grad_scale` + # and `found_inf` to the passed optimizer so that the optimizer can utilize those + # to skip the parameter updates or unscale gradients before updating parameters in + # the fused kernel, e.g. `FusedAdamMathFunctor`. + # In this behavior, `GradScaler._check_inf_per_device` is called if `OptState.READY`, + # while the method is expected to be called by users side, i.e. their optimizers. + kwargs_ = kwargs + has_grad_scaler_kwarg = ( + "grad_scaler" in inspect.signature(optimizer.step).parameters + ) + if has_grad_scaler_kwarg: + warnings.warn( + "GradScaler is going to stop passing itself as a keyword argument to the passed " + "optimizer. In the near future GradScaler registers `grad_scale: Tensor` and " + "`found_inf: Tensor` to the passed optimizer and let the optimizer use them directly.", + FutureWarning, + ) + kwargs_.update({"grad_scaler": self}) + else: + if optimizer_state["stage"] is OptState.READY: + self._check_inf_per_device(optimizer) + scaler = self._get_scale_async() + assert scaler is not None + found_inf = cast( + torch.Tensor, + sum( + [ + t.to(scaler.device, non_blocking=True) + for t in optimizer_state["found_inf_per_device"].values() + ] + ), + ) + optimizer.grad_scale = ( # type: ignore[attr-defined] + None if optimizer_state["stage"] == OptState.UNSCALED else scaler + ) + optimizer.found_inf = found_inf # type: ignore[attr-defined] + retval = optimizer.step(*args, **kwargs_) + optimizer_state["stage"] = OptState.STEPPED + if not has_grad_scaler_kwarg: + del optimizer.grad_scale # type: ignore[attr-defined] + del optimizer.found_inf # type: ignore[attr-defined] + return retval + + if optimizer_state["stage"] is OptState.READY: + self.unscale_(optimizer) + + assert ( + len(optimizer_state["found_inf_per_device"]) > 0 + ), "No inf checks were recorded for this optimizer." + + retval = self._maybe_opt_step(optimizer, optimizer_state, *args, **kwargs) + + optimizer_state["stage"] = OptState.STEPPED + + return retval + + def update(self, new_scale: Optional[Union[float, torch.Tensor]] = None) -> None: + """Update the scale factor. + + If any optimizer steps were skipped the scale is multiplied by ``backoff_factor`` + to reduce it. If ``growth_interval`` unskipped iterations occurred consecutively, + the scale is multiplied by ``growth_factor`` to increase it. + + Passing ``new_scale`` sets the new scale value manually. (``new_scale`` is not + used directly, it's used to fill GradScaler's internal scale tensor. So if + ``new_scale`` was a tensor, later in-place changes to that tensor will not further + affect the scale GradScaler uses internally.) + + Args: + new_scale (float or :class:`torch.Tensor`, optional, default=None): New scale factor. + + .. warning:: + :meth:`update` should only be called at the end of the iteration, after ``scaler.step(optimizer)`` has + been invoked for all optimizers used this iteration. + + .. warning:: + For performance reasons, we do not check the scale factor value to avoid synchronizations, + so the scale factor is not guaranteed to be above 1. If the scale falls below 1 and/or + you are seeing NaNs in your gradients or loss, something is likely wrong. For example, + bf16-pretrained models are often incompatible with AMP/fp16 due to differing dynamic ranges. + """ + if not self._enabled: + return + + _scale, _growth_tracker = self._check_scale_growth_tracker("update") + + if new_scale is not None: + assert self._scale is not None + # Accept a new user-defined scale. + if isinstance(new_scale, float): + self._scale.fill_(new_scale) + else: + reason = "new_scale should be a float or a 1-element torch.cuda.FloatTensor or \ + torch.FloatTensor with requires_grad=False." + assert new_scale.device.type == self._device, reason + assert new_scale.numel() == 1, reason + assert new_scale.requires_grad is False, reason + self._scale.copy_(new_scale) + else: + # Consume shared inf/nan data collected from optimizers to update the scale. + # If all found_inf tensors are on the same device as self._scale, this operation is asynchronous. + found_infs = [ + found_inf.to(device=_scale.device, non_blocking=True) + for state in self._per_optimizer_states.values() + for found_inf in state["found_inf_per_device"].values() + ] + + assert len(found_infs) > 0, "No inf checks were recorded prior to update." + + found_inf_combined = found_infs[0] + if len(found_infs) > 1: + for i in range(1, len(found_infs)): + found_inf_combined += found_infs[i] + + torch._amp_update_scale_( + _scale, + _growth_tracker, + found_inf_combined, + self._growth_factor, + self._backoff_factor, + self._growth_interval, + ) + + # To prepare for next iteration, clear the data collected from optimizers this iteration. + self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state) + + def _get_scale_async(self) -> Optional[torch.Tensor]: + return self._scale + + def get_scale(self) -> float: + """Return a Python float containing the current scale, or 1.0 if scaling is disabled. + + .. warning:: + :meth:`get_scale` incurs a CPU-GPU sync. + """ + if self._enabled: + return ( + self._init_scale + if (scale := self._get_scale_async()) is None + else cast(float, scale.item()) + ) + return 1.0 + + def get_growth_factor(self) -> float: + r"""Return a Python float containing the scale growth factor.""" + return self._growth_factor + + def set_growth_factor(self, new_factor: float) -> None: + r"""Set a new scale growth factor. + + Args: + new_scale (float): Value to use as the new scale growth factor. + """ + self._growth_factor = new_factor + + def get_backoff_factor(self) -> float: + r"""Return a Python float containing the scale backoff factor.""" + return self._backoff_factor + + def set_backoff_factor(self, new_factor: float) -> None: + r"""Set a new scale backoff factor. + + Args: + new_scale (float): Value to use as the new scale backoff factor. + """ + self._backoff_factor = new_factor + + def get_growth_interval(self) -> int: + r"""Return a Python int containing the growth interval.""" + return self._growth_interval + + def set_growth_interval(self, new_interval: int) -> None: + r"""Set a new growth interval. + + Args: + new_interval (int): Value to use as the new growth interval. + """ + self._growth_interval = new_interval + + def _get_growth_tracker(self) -> int: + if self._enabled: + return ( + self._init_growth_tracker + if self._growth_tracker is None + else cast(int, self._growth_tracker.item()) + ) + return 0 + + def is_enabled(self) -> bool: + r"""Return a bool indicating whether this instance is enabled.""" + return self._enabled + + def state_dict(self) -> Dict[str, Any]: + r"""Return the state of the scaler as a :class:`dict`. + + It contains five entries: + + * ``"scale"`` - a Python float containing the current scale + * ``"growth_factor"`` - a Python float containing the current growth factor + * ``"backoff_factor"`` - a Python float containing the current backoff factor + * ``"growth_interval"`` - a Python int containing the current growth interval + * ``"_growth_tracker"`` - a Python int containing the number of recent consecutive unskipped steps. + + If this instance is not enabled, returns an empty dict. + + .. note:: + If you wish to checkpoint the scaler's state after a particular iteration, :meth:`state_dict` + should be called after :meth:`update`. + """ + if self._enabled: + return { + "scale": self.get_scale(), + "growth_factor": self._growth_factor, + "backoff_factor": self._backoff_factor, + "growth_interval": self._growth_interval, + "_growth_tracker": self._get_growth_tracker(), + } + return {} + + def load_state_dict(self, state_dict: Dict[str, Any]) -> None: + r"""Load the scaler state. + + If this instance is disabled, :meth:`load_state_dict` is a no-op. + + Args: + state_dict(dict): scaler state. Should be an object returned from a call to :meth:`state_dict`. + """ + if not self._enabled: + return + + if len(state_dict) == 0: + raise RuntimeError( + "The source state dict is empty, possibly because it was saved " + "from a disabled instance of GradScaler." + ) + + self._init_scale = cast(float, state_dict["scale"]) + if self._scale is not None: + self._scale.fill_(state_dict["scale"]) + self._growth_factor = cast(float, state_dict["growth_factor"]) + self._backoff_factor = cast(float, state_dict["backoff_factor"]) + self._growth_interval = cast(int, state_dict["growth_interval"]) + self._init_growth_tracker = cast(int, state_dict["_growth_tracker"]) + if self._growth_tracker is not None: + self._growth_tracker.fill_(state_dict["_growth_tracker"]) + + def __getstate__(self) -> Dict[str, Any]: + state = self.__dict__.copy() + if self._enabled: + assert len(self._per_optimizer_states) == 0, ( + "A GradScaler instance may only be pickled at the beginning " + "of an iteration, or at the end after scaler.update()." + ) + # Pickling _scale and _growth_tracker Tensors directly triggers + # "warnings.warn("pickle support for Storage will be removed in 1.5..." + # so instead, we set the unpickled instance up to reinitialize them lazily. + state["_init_scale"] = self.get_scale() + state["_init_growth_tracker"] = self._get_growth_tracker() + state["_scale"] = None + state["_growth_tracker"] = None + return state + + def __setstate__(self, state: Dict[str, Any]) -> None: + self.__dict__.update(state) + + def _check_inf_per_device(self, optimizer: torch.optim.Optimizer) -> Dict[str, Any]: + _scale, _ = self._check_scale_growth_tracker("_check_inf_per_device") + + dummy_inv_scale = torch.full((), 1.0, dtype=torch.float32, device=_scale.device) + found_inf = torch.full((), 0.0, dtype=torch.float32, device=_scale.device) + + self._per_optimizer_states[id(optimizer)][ + "found_inf_per_device" + ] = self._unscale_grads_(optimizer, dummy_inv_scale, found_inf, True) + + return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"] + + def _found_inf_per_device(self, optimizer: torch.optim.Optimizer) -> Dict[str, Any]: + return self._per_optimizer_states[id(optimizer)]["found_inf_per_device"] diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nested/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nested/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..464f973466a4404e481799dec678cc9f95419c53 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nested/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9913cd1d79a585c9f782fb780cb212b359136747 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/nested_tensor.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/nested_tensor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbc44bff1ac8742e63c0ac34aba416c11415dcc6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/nested_tensor.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/ops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..201e79b7459331ac1a9de61138f3c74f18930d23 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/ops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/sdpa.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/sdpa.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b102d705f59fd05705fe78f321c001b38fe1e817 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/torch/nested/_internal/__pycache__/sdpa.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..c84a47288f8442851788d20022245894452bdeab --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 EleutherAI + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..43936014fd5ea9256869918b3576c3583974270f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/METADATA @@ -0,0 +1,118 @@ +Metadata-Version: 2.1 +Name: tqdm-multiprocess +Version: 0.0.11 +Summary: Easy multiprocessing with tqdm and logging redirected to main process. +Home-page: https://github.com/EleutherAI/tqdm-multiprocess +Author: researcher2 +Author-email: 2researcher2@gmail.com +License: UNKNOWN +Platform: UNKNOWN +Classifier: Programming Language :: Python :: 3 +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Requires-Python: >=3.6 +Description-Content-Type: text/markdown +Requires-Dist: tqdm +Requires-Dist: colorama +Provides-Extra: dev +Requires-Dist: twine ; extra == 'dev' + +# tqdm-multiprocess +Using queues, tqdm-multiprocess supports multiple worker processes, each with multiple tqdm progress bars, displaying them cleanly through the main process. The worker processes also have access to a single global tqdm for aggregate progress monitoring. + +Logging is also redirected from the subprocesses to the root logger in the main process. + +Currently doesn't support tqdm(iterator), you will need to intialize your worker tqdms with a total and update manually. + +Due to the performance limits of the default Python multiprocess queue you need to update your global and worker process tqdms infrequently to avoid flooding the main process. I will attempt to implement a lock free ringbuffer at some point to see if things can be improved. + +## Installation + +```bash +pip install tqdm-multiprocess +``` + +## Usage + +*TqdmMultiProcessPool* creates a standard python multiprocessing pool with the desired number of processes. Under the hood it uses async_apply with an event loop to monitor a tqdm and logging queue, allowing the worker processes to redirect both their tqdm objects and logging messages to your main process. There is also a queue for the workers to update the single global tqdm. + +As shown below, you create a list of tasks containing their function and a tuple with your parameters. The functions you pass in will need the extra arguments on the end "tqdm_func, global_tqdm". You must use tqdm_func when initializing your tqdms for the redirection to work. As mentioned above, passing iterators into the tqdm function is currently not supported, so set total=total_steps when setting up your tqdm, and then update the progress manually with the update() method. All other arguments to tqdm should work fine. + +Once you have your task list, call the map() method on your pool, passing in the process count, global_tqdm (or None), task list, as well as error and done callback functions. The error callback will be trigerred if your task functions return anything evaluating as False (if not task_result in the source code). The done callback will be called when the task succesfully completes. + +The map method returns a list containing the returned results for all your tasks in original order. + +### examples/basic_example.py + +```python +from time import sleep +import multiprocessing +import tqdm + +import logging +from tqdm_multiprocess.logger import setup_logger_tqdm +logger = logging.getLogger(__name__) + +from tqdm_multiprocess import TqdmMultiProcessPool + +iterations1 = 100 +iterations2 = 5 +iterations3 = 2 +def some_other_function(tqdm_func, global_tqdm): + + total_iterations = iterations1 * iterations2 * iterations3 + with tqdm_func(total=total_iterations, dynamic_ncols=True) as progress3: + progress3.set_description("outer") + for i in range(iterations3): + logger.info("outer") + total_iterations = iterations1 * iterations2 + with tqdm_func(total=total_iterations, dynamic_ncols=True) as progress2: + progress2.set_description("middle") + for j in range(iterations2): + logger.info("middle") + #for k in tqdm_func(range(iterations1), dynamic_ncols=True, desc="inner"): + with tqdm_func(total=iterations1, dynamic_ncols=True) as progress1: + for j in range(iterations1): + # logger.info("inner") # Spam slows down tqdm too much + progress1.set_description("innert") + sleep(0.01) + progress1.update() + progress2.update() + progress3.update() + global_tqdm.update() + + logger.warning(f"Warning test message. {multiprocessing.current_process().name}") + logger.error(f"Error test message. {multiprocessing.current_process().name}") + + +# Multiprocessed +def example_multiprocessing_function(some_input, tqdm_func, global_tqdm): + logger.debug(f"Debug test message - I won't show up in console. {multiprocessing.current_process().name}") + logger.info(f"Info test message. {multiprocessing.current_process().name}") + some_other_function(tqdm_func, global_tqdm) + return True + +def error_callback(result): + print("Error!") + +def done_callback(result): + print("Done. Result: ", result) + +def example(): + pool = TqdmMultiProcessPool() + process_count = 4 + task_count = 10 + initial_tasks = [(example_multiprocessing_function, (i,)) for i in range(task_count)] + total_iterations = iterations1 * iterations2 * iterations3 * task_count + with tqdm.tqdm(total=total_iterations, dynamic_ncols=True) as global_progress: + global_progress.set_description("global") + results = pool.map(process_count, global_progress, initial_tasks, error_callback, done_callback) + print(results) + +if __name__ == '__main__': + logfile_path = "tqdm_multiprocessing_example.log" + setup_logger_tqdm(logfile_path) # Logger will write messages using tqdm.write + example() +``` + + diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..cfa85245b1b2db3d2f2f995a783c0fdb6f84864c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/RECORD @@ -0,0 +1,18 @@ +examples/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +examples/__pycache__/__init__.cpython-310.pyc,, +examples/__pycache__/basic_example.cpython-310.pyc,, +examples/basic_example.py,sha256=BXEyjPzHVxUVtD2g5A-xeZOtzRY10jiyuITTRqI67n8,2463 +tqdm_multiprocess-0.0.11.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +tqdm_multiprocess-0.0.11.dist-info/LICENSE,sha256=TLAo1IEUDiwrQ9q89Bt-tJmmCon8eSsGrAZAzpwLguo,1088 +tqdm_multiprocess-0.0.11.dist-info/METADATA,sha256=BlWIKFVIP1eqTciC9JJO15VSoGMf70M9nWAWu8Oiq64,5679 +tqdm_multiprocess-0.0.11.dist-info/RECORD,, +tqdm_multiprocess-0.0.11.dist-info/WHEEL,sha256=D1Wh14kWDxPnrM-5t_6UCB-UuQNrEODtRa3vF4OsvQY,97 +tqdm_multiprocess-0.0.11.dist-info/top_level.txt,sha256=84yOpK8vPnmQ1iMs3w7vmZuUFMI2t62KewLi_i7CniE,18 +tqdm_multiprocess/__init__.py,sha256=Iv5LxLnGnJfTkeg12f6EveHdejESd6Y15AW1iLi3Z5U,39 +tqdm_multiprocess/__pycache__/__init__.cpython-310.pyc,, +tqdm_multiprocess/__pycache__/logger.cpython-310.pyc,, +tqdm_multiprocess/__pycache__/std.cpython-310.pyc,, +tqdm_multiprocess/__pycache__/tqdm_multiprocess.cpython-310.pyc,, +tqdm_multiprocess/logger.py,sha256=d13GtAC4b1W-i0sKEYfh6sxEEQ-1vk8KItoWCqUKs6I,2919 +tqdm_multiprocess/std.py,sha256=csXggdcU488MxpPl-O1oByWQxQYRrImokk4nMyWKIO8,8006 +tqdm_multiprocess/tqdm_multiprocess.py,sha256=jZFLI3qyWnl5q4aABWWJjG4-_8OshpmPQfFPHOPqbOA,5712 diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..a9d82522f6d5852d975d031fa3d5791ced4d738a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.35.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/top_level.txt b/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..da91afe6090b269413fa65ecce26becf0dc8a003 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/tqdm_multiprocess-0.0.11.dist-info/top_level.txt @@ -0,0 +1 @@ +tqdm_multiprocess diff --git a/llmeval-env/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/INSTALLER b/llmeval-env/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/llmeval-env/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/LICENSE b/llmeval-env/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..f26bcf4d2de6eb136e31006ca3ab447d5e488adf --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/LICENSE @@ -0,0 +1,279 @@ +A. HISTORY OF THE SOFTWARE +========================== + +Python was created in the early 1990s by Guido van Rossum at Stichting +Mathematisch Centrum (CWI, see https://www.cwi.nl) in the Netherlands +as a successor of a language called ABC. Guido remains Python's +principal author, although it includes many contributions from others. + +In 1995, Guido continued his work on Python at the Corporation for +National Research Initiatives (CNRI, see https://www.cnri.reston.va.us) +in Reston, Virginia where he released several versions of the +software. + +In May 2000, Guido and the Python core development team moved to +BeOpen.com to form the BeOpen PythonLabs team. In October of the same +year, the PythonLabs team moved to Digital Creations, which became +Zope Corporation. In 2001, the Python Software Foundation (PSF, see +https://www.python.org/psf/) was formed, a non-profit organization +created specifically to own Python-related Intellectual Property. +Zope Corporation was a sponsoring member of the PSF. + +All Python releases are Open Source (see https://opensource.org for +the Open Source Definition). Historically, most, but not all, Python +releases have also been GPL-compatible; the table below summarizes +the various releases. + + Release Derived Year Owner GPL- + from compatible? (1) + + 0.9.0 thru 1.2 1991-1995 CWI yes + 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes + 1.6 1.5.2 2000 CNRI no + 2.0 1.6 2000 BeOpen.com no + 1.6.1 1.6 2001 CNRI yes (2) + 2.1 2.0+1.6.1 2001 PSF no + 2.0.1 2.0+1.6.1 2001 PSF yes + 2.1.1 2.1+2.0.1 2001 PSF yes + 2.1.2 2.1.1 2002 PSF yes + 2.1.3 2.1.2 2002 PSF yes + 2.2 and above 2.1.1 2001-now PSF yes + +Footnotes: + +(1) GPL-compatible doesn't mean that we're distributing Python under + the GPL. All Python licenses, unlike the GPL, let you distribute + a modified version without making your changes open source. The + GPL-compatible licenses make it possible to combine Python with + other software that is released under the GPL; the others don't. + +(2) According to Richard Stallman, 1.6.1 is not GPL-compatible, + because its license has a choice of law clause. According to + CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 + is "not incompatible" with the GPL. + +Thanks to the many outside volunteers who have worked under Guido's +direction to make these releases possible. + + +B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON +=============================================================== + +Python software and documentation are licensed under the +Python Software Foundation License Version 2. + +Starting with Python 3.8.6, examples, recipes, and other code in +the documentation are dual licensed under the PSF License Version 2 +and the Zero-Clause BSD license. + +Some software incorporated into Python is under different licenses. +The licenses are listed with code falling under that license. + + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023 Python Software Foundation; +All Rights Reserved" are retained in Python alone or in any derivative version +prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 +------------------------------------------- + +BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 + +1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an +office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the +Individual or Organization ("Licensee") accessing and otherwise using +this software in source or binary form and its associated +documentation ("the Software"). + +2. Subject to the terms and conditions of this BeOpen Python License +Agreement, BeOpen hereby grants Licensee a non-exclusive, +royalty-free, world-wide license to reproduce, analyze, test, perform +and/or display publicly, prepare derivative works, distribute, and +otherwise use the Software alone or in any derivative version, +provided, however, that the BeOpen Python License is retained in the +Software, alone or in any derivative version prepared by Licensee. + +3. BeOpen is making the Software available to Licensee on an "AS IS" +basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE +SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS +AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY +DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +5. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +6. This License Agreement shall be governed by and interpreted in all +respects by the law of the State of California, excluding conflict of +law provisions. Nothing in this License Agreement shall be deemed to +create any relationship of agency, partnership, or joint venture +between BeOpen and Licensee. This License Agreement does not grant +permission to use BeOpen trademarks or trade names in a trademark +sense to endorse or promote products or services of Licensee, or any +third party. As an exception, the "BeOpen Python" logos available at +http://www.pythonlabs.com/logos.html may be used according to the +permissions granted on that web page. + +7. By copying, installing or otherwise using the software, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 +--------------------------------------- + +1. This LICENSE AGREEMENT is between the Corporation for National +Research Initiatives, having an office at 1895 Preston White Drive, +Reston, VA 20191 ("CNRI"), and the Individual or Organization +("Licensee") accessing and otherwise using Python 1.6.1 software in +source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, CNRI +hereby grants Licensee a nonexclusive, royalty-free, world-wide +license to reproduce, analyze, test, perform and/or display publicly, +prepare derivative works, distribute, and otherwise use Python 1.6.1 +alone or in any derivative version, provided, however, that CNRI's +License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) +1995-2001 Corporation for National Research Initiatives; All Rights +Reserved" are retained in Python 1.6.1 alone or in any derivative +version prepared by Licensee. Alternately, in lieu of CNRI's License +Agreement, Licensee may substitute the following text (omitting the +quotes): "Python 1.6.1 is made available subject to the terms and +conditions in CNRI's License Agreement. This Agreement together with +Python 1.6.1 may be located on the internet using the following +unique, persistent identifier (known as a handle): 1895.22/1013. This +Agreement may also be obtained from a proxy server on the internet +using the following URL: http://hdl.handle.net/1895.22/1013". + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python 1.6.1 or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python 1.6.1. + +4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" +basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. This License Agreement shall be governed by the federal +intellectual property law of the United States, including without +limitation the federal copyright law, and, to the extent such +U.S. federal law does not apply, by the law of the Commonwealth of +Virginia, excluding Virginia's conflict of law provisions. +Notwithstanding the foregoing, with regard to derivative works based +on Python 1.6.1 that incorporate non-separable material that was +previously distributed under the GNU General Public License (GPL), the +law of the Commonwealth of Virginia shall govern this License +Agreement only as to issues arising under or with respect to +Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this +License Agreement shall be deemed to create any relationship of +agency, partnership, or joint venture between CNRI and Licensee. This +License Agreement does not grant permission to use CNRI trademarks or +trade name in a trademark sense to endorse or promote products or +services of Licensee, or any third party. + +8. By clicking on the "ACCEPT" button where indicated, or by copying, +installing or otherwise using Python 1.6.1, Licensee agrees to be +bound by the terms and conditions of this License Agreement. + + ACCEPT + + +CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 +-------------------------------------------------- + +Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, +The Netherlands. All rights reserved. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of Stichting Mathematisch +Centrum or CWI not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. + +STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO +THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE +FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION +---------------------------------------------------------------------- + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. diff --git a/llmeval-env/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/METADATA b/llmeval-env/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..5089b4ddde4835e1dd7dd8ef6ac52f27398c0ae5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/METADATA @@ -0,0 +1,66 @@ +Metadata-Version: 2.1 +Name: typing_extensions +Version: 4.11.0 +Summary: Backported and Experimental Type Hints for Python 3.8+ +Keywords: annotations,backport,checker,checking,function,hinting,hints,type,typechecking,typehinting,typehints,typing +Author-email: "Guido van Rossum, Jukka Lehtosalo, Łukasz Langa, Michael Lee" +Requires-Python: >=3.8 +Description-Content-Type: text/markdown +Classifier: Development Status :: 5 - Production/Stable +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Python Software Foundation License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Software Development +Project-URL: Bug Tracker, https://github.com/python/typing_extensions/issues +Project-URL: Changes, https://github.com/python/typing_extensions/blob/main/CHANGELOG.md +Project-URL: Documentation, https://typing-extensions.readthedocs.io/ +Project-URL: Home, https://github.com/python/typing_extensions +Project-URL: Q & A, https://github.com/python/typing/discussions +Project-URL: Repository, https://github.com/python/typing_extensions + +# Typing Extensions + +[![Chat at https://gitter.im/python/typing](https://badges.gitter.im/python/typing.svg)](https://gitter.im/python/typing) + +[Documentation](https://typing-extensions.readthedocs.io/en/latest/#) – +[PyPI](https://pypi.org/project/typing-extensions/) + +## Overview + +The `typing_extensions` module serves two related purposes: + +- Enable use of new type system features on older Python versions. For example, + `typing.TypeGuard` is new in Python 3.10, but `typing_extensions` allows + users on previous Python versions to use it too. +- Enable experimentation with new type system PEPs before they are accepted and + added to the `typing` module. + +`typing_extensions` is treated specially by static type checkers such as +mypy and pyright. Objects defined in `typing_extensions` are treated the same +way as equivalent forms in `typing`. + +`typing_extensions` uses +[Semantic Versioning](https://semver.org/). The +major version will be incremented only for backwards-incompatible changes. +Therefore, it's safe to depend +on `typing_extensions` like this: `typing_extensions >=x.y, <(x+1)`, +where `x.y` is the first version that includes all features you need. + +## Included items + +See [the documentation](https://typing-extensions.readthedocs.io/en/latest/#) for a +complete listing of module contents. + +## Contributing + +See [CONTRIBUTING.md](https://github.com/python/typing_extensions/blob/main/CONTRIBUTING.md) +for how to contribute to `typing_extensions`. + diff --git a/llmeval-env/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/RECORD b/llmeval-env/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..b06fbb5eeff1415d272c12b9800824a544d3618d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/RECORD @@ -0,0 +1,7 @@ +__pycache__/typing_extensions.cpython-310.pyc,, +typing_extensions-4.11.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +typing_extensions-4.11.0.dist-info/LICENSE,sha256=Oy-B_iHRgcSZxZolbI4ZaEVdZonSaaqFNzv7avQdo78,13936 +typing_extensions-4.11.0.dist-info/METADATA,sha256=rhGXH-iuwULXNZDAtjXcjwU2jcxotMTkFiUg0OqYXzI,2967 +typing_extensions-4.11.0.dist-info/RECORD,, +typing_extensions-4.11.0.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81 +typing_extensions.py,sha256=mvJ5zBIuLMsaVGHZjRLsGh8ouwg5Kr3uNbWCHX8-Ao0,122293 diff --git a/llmeval-env/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/WHEEL b/llmeval-env/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..3b5e64b5e6c4a210201d1676a891fd57b15cda99 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/typing_extensions-4.11.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.9.0 +Root-Is-Purelib: true +Tag: py3-none-any