diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate/__init__.py b/env-llmeval/lib/python3.10/site-packages/pathvalidate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f1b3d6243d800c28469efad4bb452e40c76e6703 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate/__init__.py @@ -0,0 +1,81 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from .__version__ import __author__, __copyright__, __email__, __license__, __version__ +from ._base import AbstractSanitizer, AbstractValidator +from ._common import ( + ascii_symbols, + normalize_platform, + replace_ansi_escape, + replace_unprintable_char, + unprintable_ascii_chars, + validate_pathtype, + validate_unprintable_char, +) +from ._const import Platform +from ._filename import ( + FileNameSanitizer, + FileNameValidator, + is_valid_filename, + sanitize_filename, + validate_filename, +) +from ._filepath import ( + FilePathSanitizer, + FilePathValidator, + is_valid_filepath, + sanitize_filepath, + validate_filepath, +) +from ._ltsv import sanitize_ltsv_label, validate_ltsv_label +from ._symbol import replace_symbol, validate_symbol +from .error import ( + ErrorReason, + InvalidCharError, + InvalidReservedNameError, + NullNameError, + ReservedNameError, + ValidationError, + ValidReservedNameError, +) + + +__all__ = ( + "__author__", + "__copyright__", + "__email__", + "__license__", + "__version__", + "AbstractSanitizer", + "AbstractValidator", + "Platform", + "ascii_symbols", + "normalize_platform", + "replace_ansi_escape", + "replace_unprintable_char", + "unprintable_ascii_chars", + "validate_pathtype", + "validate_unprintable_char", + "FileNameSanitizer", + "FileNameValidator", + "is_valid_filename", + "sanitize_filename", + "validate_filename", + "FilePathSanitizer", + "FilePathValidator", + "is_valid_filepath", + "sanitize_filepath", + "validate_filepath", + "sanitize_ltsv_label", + "validate_ltsv_label", + "replace_symbol", + "validate_symbol", + "ErrorReason", + "InvalidCharError", + "InvalidReservedNameError", + "NullNameError", + "ReservedNameError", + "ValidationError", + "ValidReservedNameError", +) diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate/__version__.py b/env-llmeval/lib/python3.10/site-packages/pathvalidate/__version__.py new file mode 100644 index 0000000000000000000000000000000000000000..7359321cede8ecec1c85b17c509a41966fe649a2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate/__version__.py @@ -0,0 +1,6 @@ +__author__ = "Tsuyoshi Hombashi" +__copyright__ = f"Copyright 2016, {__author__}" +__license__ = "MIT License" +__version__ = "3.2.0" +__maintainer__ = __author__ +__email__ = "tsuyoshi.hombashi@gmail.com" diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate/_base.py b/env-llmeval/lib/python3.10/site-packages/pathvalidate/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..b829410c962bae5c5ecc061e2bc4661cd0d85b6e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate/_base.py @@ -0,0 +1,237 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import abc +import os +import sys +from typing import ClassVar, Optional, Sequence, Tuple + +from ._common import normalize_platform, unprintable_ascii_chars +from ._const import DEFAULT_MIN_LEN, Platform +from ._types import PathType, PlatformType +from .error import ReservedNameError, ValidationError +from .handler import NullValueHandler, ReservedNameHandler, ValidationErrorHandler + + +class BaseFile: + _INVALID_PATH_CHARS: ClassVar[str] = "".join(unprintable_ascii_chars) + _INVALID_FILENAME_CHARS: ClassVar[str] = _INVALID_PATH_CHARS + "/" + _INVALID_WIN_PATH_CHARS: ClassVar[str] = _INVALID_PATH_CHARS + ':*?"<>|\t\n\r\x0b\x0c' + _INVALID_WIN_FILENAME_CHARS: ClassVar[str] = ( + _INVALID_FILENAME_CHARS + _INVALID_WIN_PATH_CHARS + "\\" + ) + + @property + def platform(self) -> Platform: + return self.__platform + + @property + def reserved_keywords(self) -> Tuple[str, ...]: + return self._additional_reserved_names + + @property + def max_len(self) -> int: + return self._max_len + + def __init__( + self, + max_len: int, + fs_encoding: Optional[str], + additional_reserved_names: Optional[Sequence[str]] = None, + platform_max_len: Optional[int] = None, + platform: Optional[PlatformType] = None, + ) -> None: + if additional_reserved_names is None: + additional_reserved_names = tuple() + self._additional_reserved_names = tuple(n.upper() for n in additional_reserved_names) + + self.__platform = normalize_platform(platform) + + if platform_max_len is None: + platform_max_len = self._get_default_max_path_len() + + if max_len <= 0: + self._max_len = platform_max_len + else: + self._max_len = max_len + + self._max_len = min(self._max_len, platform_max_len) + + if fs_encoding: + self._fs_encoding = fs_encoding + else: + self._fs_encoding = sys.getfilesystemencoding() + + def _is_posix(self) -> bool: + return self.platform == Platform.POSIX + + def _is_universal(self) -> bool: + return self.platform == Platform.UNIVERSAL + + def _is_linux(self, include_universal: bool = False) -> bool: + if include_universal: + return self.platform in (Platform.UNIVERSAL, Platform.LINUX) + + return self.platform == Platform.LINUX + + def _is_windows(self, include_universal: bool = False) -> bool: + if include_universal: + return self.platform in (Platform.UNIVERSAL, Platform.WINDOWS) + + return self.platform == Platform.WINDOWS + + def _is_macos(self, include_universal: bool = False) -> bool: + if include_universal: + return self.platform in (Platform.UNIVERSAL, Platform.MACOS) + + return self.platform == Platform.MACOS + + def _get_default_max_path_len(self) -> int: + if self._is_linux(): + return 4096 + + if self._is_windows(): + return 260 + + if self._is_posix() or self._is_macos(): + return 1024 + + return 260 # universal + + +class AbstractValidator(BaseFile, metaclass=abc.ABCMeta): + def __init__( + self, + max_len: int, + fs_encoding: Optional[str], + check_reserved: bool, + additional_reserved_names: Optional[Sequence[str]] = None, + platform_max_len: Optional[int] = None, + platform: Optional[PlatformType] = None, + ) -> None: + self._check_reserved = check_reserved + + super().__init__( + max_len, + fs_encoding, + additional_reserved_names=additional_reserved_names, + platform_max_len=platform_max_len, + platform=platform, + ) + + @abc.abstractproperty + def min_len(self) -> int: # pragma: no cover + pass + + @abc.abstractmethod + def validate(self, value: PathType) -> None: # pragma: no cover + pass + + def is_valid(self, value: PathType) -> bool: + try: + self.validate(value) + except (TypeError, ValidationError): + return False + + return True + + def _is_reserved_keyword(self, value: str) -> bool: + return value in self.reserved_keywords + + +class AbstractSanitizer(BaseFile, metaclass=abc.ABCMeta): + def __init__( + self, + validator: AbstractValidator, + max_len: int, + fs_encoding: Optional[str], + validate_after_sanitize: bool, + null_value_handler: Optional[ValidationErrorHandler] = None, + reserved_name_handler: Optional[ValidationErrorHandler] = None, + additional_reserved_names: Optional[Sequence[str]] = None, + platform_max_len: Optional[int] = None, + platform: Optional[PlatformType] = None, + ) -> None: + super().__init__( + max_len=max_len, + fs_encoding=fs_encoding, + additional_reserved_names=additional_reserved_names, + platform_max_len=platform_max_len, + platform=platform, + ) + + if null_value_handler is None: + null_value_handler = NullValueHandler.return_null_string + self._null_value_handler = null_value_handler + + if reserved_name_handler is None: + reserved_name_handler = ReservedNameHandler.add_trailing_underscore + self._reserved_name_handler = reserved_name_handler + + self._validate_after_sanitize = validate_after_sanitize + + self._validator = validator + + @abc.abstractmethod + def sanitize(self, value: PathType, replacement_text: str = "") -> PathType: # pragma: no cover + pass + + +class BaseValidator(AbstractValidator): + @property + def min_len(self) -> int: + return self._min_len + + def __init__( + self, + min_len: int, + max_len: int, + fs_encoding: Optional[str], + check_reserved: bool, + additional_reserved_names: Optional[Sequence[str]] = None, + platform_max_len: Optional[int] = None, + platform: Optional[PlatformType] = None, + ) -> None: + if min_len <= 0: + min_len = DEFAULT_MIN_LEN + self._min_len = max(min_len, 1) + + super().__init__( + max_len=max_len, + fs_encoding=fs_encoding, + check_reserved=check_reserved, + additional_reserved_names=additional_reserved_names, + platform_max_len=platform_max_len, + platform=platform, + ) + + self._validate_max_len() + + def _validate_reserved_keywords(self, name: str) -> None: + if not self._check_reserved: + return + + root_name = self.__extract_root_name(name) + base_name = os.path.basename(name).upper() + + if self._is_reserved_keyword(root_name.upper()) or self._is_reserved_keyword( + base_name.upper() + ): + raise ReservedNameError( + f"'{root_name}' is a reserved name", + reusable_name=False, + reserved_name=root_name, + platform=self.platform, + ) + + def _validate_max_len(self) -> None: + if self.max_len < 1: + raise ValueError("max_len must be greater or equal to one") + + if self.min_len > self.max_len: + raise ValueError("min_len must be lower than max_len") + + @staticmethod + def __extract_root_name(path: str) -> str: + return os.path.splitext(os.path.basename(path))[0] diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate/_const.py b/env-llmeval/lib/python3.10/site-packages/pathvalidate/_const.py new file mode 100644 index 0000000000000000000000000000000000000000..6a53dfdcd45425fb9db7e68bbbd15847a5c0b161 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate/_const.py @@ -0,0 +1,40 @@ +import enum + + +DEFAULT_MIN_LEN = 1 +INVALID_CHAR_ERR_MSG_TMPL = "invalids=({invalid}), value={value}" + + +_NTFS_RESERVED_FILE_NAMES = ( + "$Mft", + "$MftMirr", + "$LogFile", + "$Volume", + "$AttrDef", + "$Bitmap", + "$Boot", + "$BadClus", + "$Secure", + "$Upcase", + "$Extend", + "$Quota", + "$ObjId", + "$Reparse", +) # Only in root directory + + +@enum.unique +class Platform(enum.Enum): + """ + Platform specifier enumeration. + """ + + #: POSIX compatible platform. + POSIX = "POSIX" + + #: platform independent. note that absolute paths cannot specify this. + UNIVERSAL = "universal" + + LINUX = "Linux" + WINDOWS = "Windows" + MACOS = "macOS" diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate/_symbol.py b/env-llmeval/lib/python3.10/site-packages/pathvalidate/_symbol.py new file mode 100644 index 0000000000000000000000000000000000000000..4d37cd5aa57c764f67e98e0a1c9611de982bd40a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate/_symbol.py @@ -0,0 +1,92 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import re +from typing import Sequence + +from ._common import ascii_symbols, to_str, unprintable_ascii_chars +from .error import InvalidCharError + + +__RE_SYMBOL = re.compile( + "[{}]".format(re.escape("".join(ascii_symbols + unprintable_ascii_chars))), re.UNICODE +) + + +def validate_symbol(text: str) -> None: + """ + Verifying whether symbol(s) included in the ``text`` or not. + + Args: + text: + Input text to validate. + + Raises: + ValidationError (ErrorReason.INVALID_CHARACTER): + If symbol(s) included in the ``text``. + """ + + match_list = __RE_SYMBOL.findall(to_str(text)) + if match_list: + raise InvalidCharError(f"invalid symbols found: {match_list}") + + +def replace_symbol( + text: str, + replacement_text: str = "", + exclude_symbols: Sequence[str] = [], + is_replace_consecutive_chars: bool = False, + is_strip: bool = False, +) -> str: + """ + Replace all of the symbols in the ``text``. + + Args: + text: + Input text. + replacement_text: + Replacement text. + exclude_symbols: + Symbols that exclude from the replacement. + is_replace_consecutive_chars: + If |True|, replace consecutive multiple ``replacement_text`` characters + to a single character. + is_strip: + If |True|, strip ``replacement_text`` from the beginning/end of the replacement text. + + Returns: + A replacement string. + + Example: + + :ref:`example-sanitize-symbol` + """ + + if exclude_symbols: + regexp = re.compile( + "[{}]".format( + re.escape( + "".join(set(ascii_symbols + unprintable_ascii_chars) - set(exclude_symbols)) + ) + ), + re.UNICODE, + ) + else: + regexp = __RE_SYMBOL + + try: + new_text = regexp.sub(replacement_text, to_str(text)) + except TypeError: + raise TypeError("text must be a string") + + if not replacement_text: + return new_text + + if is_replace_consecutive_chars: + new_text = re.sub(f"{re.escape(replacement_text)}+", replacement_text, new_text) + + if is_strip: + new_text = new_text.strip(replacement_text) + + return new_text diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate/_types.py b/env-llmeval/lib/python3.10/site-packages/pathvalidate/_types.py new file mode 100644 index 0000000000000000000000000000000000000000..c946842489cb1998c4e9a2dbcb95ec9286e0a528 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate/_types.py @@ -0,0 +1,8 @@ +from pathlib import Path +from typing import TypeVar + +from ._const import Platform + + +PathType = TypeVar("PathType", str, Path) +PlatformType = TypeVar("PlatformType", str, Platform) diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate/argparse.py b/env-llmeval/lib/python3.10/site-packages/pathvalidate/argparse.py new file mode 100644 index 0000000000000000000000000000000000000000..baeafe2751f007276ed14fa273f4fe206673d744 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate/argparse.py @@ -0,0 +1,47 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +from argparse import ArgumentTypeError + +from ._filename import sanitize_filename, validate_filename +from ._filepath import sanitize_filepath, validate_filepath +from .error import ValidationError + + +def validate_filename_arg(value: str) -> str: + if not value: + return "" + + try: + validate_filename(value) + except ValidationError as e: + raise ArgumentTypeError(e) + + return value + + +def validate_filepath_arg(value: str) -> str: + if not value: + return "" + + try: + validate_filepath(value, platform="auto") + except ValidationError as e: + raise ArgumentTypeError(e) + + return value + + +def sanitize_filename_arg(value: str) -> str: + if not value: + return "" + + return sanitize_filename(value) + + +def sanitize_filepath_arg(value: str) -> str: + if not value: + return "" + + return sanitize_filepath(value, platform="auto") diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate/click.py b/env-llmeval/lib/python3.10/site-packages/pathvalidate/click.py new file mode 100644 index 0000000000000000000000000000000000000000..76be6ea45aa974cb98ed5990628cd0cac766c03f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate/click.py @@ -0,0 +1,48 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import click +from click.core import Context, Option + +from ._filename import sanitize_filename, validate_filename +from ._filepath import sanitize_filepath, validate_filepath +from .error import ValidationError + + +def validate_filename_arg(ctx: Context, param: Option, value: str) -> str: + if not value: + return "" + + try: + validate_filename(value) + except ValidationError as e: + raise click.BadParameter(str(e)) + + return value + + +def validate_filepath_arg(ctx: Context, param: Option, value: str) -> str: + if not value: + return "" + + try: + validate_filepath(value) + except ValidationError as e: + raise click.BadParameter(str(e)) + + return value + + +def sanitize_filename_arg(ctx: Context, param: Option, value: str) -> str: + if not value: + return "" + + return sanitize_filename(value) + + +def sanitize_filepath_arg(ctx: Context, param: Option, value: str) -> str: + if not value: + return "" + + return sanitize_filepath(value) diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate/error.py b/env-llmeval/lib/python3.10/site-packages/pathvalidate/error.py new file mode 100644 index 0000000000000000000000000000000000000000..ee75600b85d5b5938cb0942c206b0b63134f3097 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate/error.py @@ -0,0 +1,253 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + +import enum +from typing import Dict, Optional + +from ._const import Platform + + +def _to_error_code(code: int) -> str: + return f"PV{code:04d}" + + +class ErrorAttrKey: + BYTE_COUNT = "byte_count" + DESCRIPTION = "description" + FS_ENCODING = "fs_encoding" + PLATFORM = "platform" + REASON = "reason" + RESERVED_NAME = "reserved_name" + REUSABLE_NAME = "reusable_name" + + +@enum.unique +class ErrorReason(enum.Enum): + """ + Validation error reasons. + """ + + NULL_NAME = (_to_error_code(1001), "NULL_NAME", "the value must not be an empty") + RESERVED_NAME = ( + _to_error_code(1002), + "RESERVED_NAME", + "found a reserved name by a platform", + ) + INVALID_CHARACTER = ( + _to_error_code(1100), + "INVALID_CHARACTER", + "invalid characters found", + ) + INVALID_LENGTH = ( + _to_error_code(1101), + "INVALID_LENGTH", + "found an invalid string length", + ) + FOUND_ABS_PATH = ( + _to_error_code(1200), + "FOUND_ABS_PATH", + "found an absolute path where must be a relative path", + ) + MALFORMED_ABS_PATH = ( + _to_error_code(1201), + "MALFORMED_ABS_PATH", + "found a malformed absolute path", + ) + INVALID_AFTER_SANITIZE = ( + _to_error_code(2000), + "INVALID_AFTER_SANITIZE", + "found invalid value after sanitizing", + ) + + @property + def code(self) -> str: + """str: Error code.""" + return self.__code + + @property + def name(self) -> str: + """str: Error reason name.""" + return self.__name + + @property + def description(self) -> str: + """str: Error reason description.""" + return self.__description + + def __init__(self, code: str, name: str, description: str) -> None: + self.__name = name + self.__code = code + self.__description = description + + def __str__(self) -> str: + return f"[{self.__code}] {self.__description}" + + +class ValidationError(ValueError): + """ + Exception class of validation errors. + """ + + @property + def platform(self) -> Optional[Platform]: + """ + :py:class:`~pathvalidate.Platform`: Platform information. + """ + return self.__platform + + @property + def reason(self) -> ErrorReason: + """ + :py:class:`~pathvalidate.error.ErrorReason`: The cause of the error. + """ + return self.__reason + + @property + def description(self) -> Optional[str]: + """Optional[str]: Error description.""" + return self.__description + + @property + def reserved_name(self) -> str: + """str: Reserved name.""" + return self.__reserved_name + + @property + def reusable_name(self) -> Optional[bool]: + """Optional[bool]: Whether the name is reusable or not.""" + return self.__reusable_name + + @property + def fs_encoding(self) -> Optional[str]: + """Optional[str]: File system encoding.""" + return self.__fs_encoding + + @property + def byte_count(self) -> Optional[int]: + """Optional[int]: Byte count of the path.""" + return self.__byte_count + + def __init__(self, *args, **kwargs) -> None: # type: ignore + if ErrorAttrKey.REASON not in kwargs: + raise ValueError(f"{ErrorAttrKey.REASON} must be specified") + + self.__reason: ErrorReason = kwargs.pop(ErrorAttrKey.REASON) + self.__byte_count: Optional[int] = kwargs.pop(ErrorAttrKey.BYTE_COUNT, None) + self.__platform: Optional[Platform] = kwargs.pop(ErrorAttrKey.PLATFORM, None) + self.__description: Optional[str] = kwargs.pop(ErrorAttrKey.DESCRIPTION, None) + self.__reserved_name: str = kwargs.pop(ErrorAttrKey.RESERVED_NAME, "") + self.__reusable_name: Optional[bool] = kwargs.pop(ErrorAttrKey.REUSABLE_NAME, None) + self.__fs_encoding: Optional[str] = kwargs.pop(ErrorAttrKey.FS_ENCODING, None) + + try: + super().__init__(*args[0], **kwargs) + except IndexError: + super().__init__(*args, **kwargs) + + def as_slog(self) -> Dict[str, str]: + """Return a dictionary representation of the error. + + Returns: + Dict[str, str]: A dictionary representation of the error. + """ + + slog: Dict[str, str] = { + "code": self.reason.code, + ErrorAttrKey.DESCRIPTION: self.reason.description, + } + if self.platform: + slog[ErrorAttrKey.PLATFORM] = self.platform.value + if self.description: + slog[ErrorAttrKey.DESCRIPTION] = self.description + if self.__reusable_name is not None: + slog[ErrorAttrKey.REUSABLE_NAME] = str(self.__reusable_name) + if self.__fs_encoding: + slog[ErrorAttrKey.FS_ENCODING] = self.__fs_encoding + if self.__byte_count: + slog[ErrorAttrKey.BYTE_COUNT] = str(self.__byte_count) + + return slog + + def __str__(self) -> str: + item_list = [] + header = str(self.reason) + + if Exception.__str__(self): + item_list.append(Exception.__str__(self)) + + if self.platform: + item_list.append(f"{ErrorAttrKey.PLATFORM}={self.platform.value}") + if self.description: + item_list.append(f"{ErrorAttrKey.DESCRIPTION}={self.description}") + if self.__reusable_name is not None: + item_list.append(f"{ErrorAttrKey.REUSABLE_NAME}={self.reusable_name}") + if self.__fs_encoding: + item_list.append(f"{ErrorAttrKey.FS_ENCODING}={self.__fs_encoding}") + if self.__byte_count is not None: + item_list.append(f"{ErrorAttrKey.BYTE_COUNT}={self.__byte_count:,d}") + + if item_list: + header += ": " + + return header + ", ".join(item_list).strip() + + def __repr__(self) -> str: + return self.__str__() + + +class NullNameError(ValidationError): + """[Deprecated] + Exception raised when a name is empty. + """ + + def __init__(self, *args, **kwargs) -> None: # type: ignore + kwargs[ErrorAttrKey.REASON] = ErrorReason.NULL_NAME + + super().__init__(args, **kwargs) + + +class InvalidCharError(ValidationError): + """ + Exception raised when includes invalid character(s) within a string. + """ + + def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def] + kwargs[ErrorAttrKey.REASON] = ErrorReason.INVALID_CHARACTER + + super().__init__(args, **kwargs) + + +class ReservedNameError(ValidationError): + """ + Exception raised when a string matched a reserved name. + """ + + def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def] + kwargs[ErrorAttrKey.REASON] = ErrorReason.RESERVED_NAME + + super().__init__(args, **kwargs) + + +class ValidReservedNameError(ReservedNameError): + """[Deprecated] + Exception raised when a string matched a reserved name. + However, it can be used as a name. + """ + + def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def] + kwargs[ErrorAttrKey.REUSABLE_NAME] = True + + super().__init__(args, **kwargs) + + +class InvalidReservedNameError(ReservedNameError): + """[Deprecated] + Exception raised when a string matched a reserved name. + Moreover, the reserved name is invalid as a name. + """ + + def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def] + kwargs[ErrorAttrKey.REUSABLE_NAME] = False + + super().__init__(args, **kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate/handler.py b/env-llmeval/lib/python3.10/site-packages/pathvalidate/handler.py new file mode 100644 index 0000000000000000000000000000000000000000..0671891d6f57e959ec0027d7d975707414ed5d15 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pathvalidate/handler.py @@ -0,0 +1,139 @@ +""" +.. codeauthor:: Tsuyoshi Hombashi +""" + + +import warnings +from datetime import datetime +from typing import Callable + +from .error import ValidationError + + +ValidationErrorHandler = Callable[[ValidationError], str] + + +def return_null_string(e: ValidationError) -> str: + """Null value handler that always returns an empty string. + + Args: + e (ValidationError): A validation error. + + Returns: + str: An empty string. + """ + + warnings.warn( + "'return_null_string' is deprecated. Use 'NullValueHandler.return_null_string' instead.", + DeprecationWarning, + ) + + return "" + + +def return_timestamp(e: ValidationError) -> str: + """Null value handler that returns a timestamp of when the function was called. + + Args: + e (ValidationError): A validation error. + + Returns: + str: A timestamp. + """ + + warnings.warn( + "'return_timestamp' is deprecated. Use 'NullValueHandler.reserved_name_handler' instead.", + DeprecationWarning, + ) + + return str(datetime.now().timestamp()) + + +def raise_error(e: ValidationError) -> str: + """Null value handler that always raises an exception. + + Args: + e (ValidationError): A validation error. + + Raises: + ValidationError: Always raised. + """ + + raise e + + +class NullValueHandler: + @classmethod + def return_null_string(cls, e: ValidationError) -> str: + """Null value handler that always returns an empty string. + + Args: + e (ValidationError): A validation error. + + Returns: + str: An empty string. + """ + + return "" + + @classmethod + def return_timestamp(cls, e: ValidationError) -> str: + """Null value handler that returns a timestamp of when the function was called. + + Args: + e (ValidationError): A validation error. + + Returns: + str: A timestamp. + """ + + return str(datetime.now().timestamp()) + + +class ReservedNameHandler: + @classmethod + def add_leading_underscore(cls, e: ValidationError) -> str: + """Reserved name handler that adds a leading underscore (``"_"``) to the name + except for ``"."`` and ``".."``. + + Args: + e (ValidationError): A reserved name error. + + Returns: + str: The converted name. + """ + + if e.reserved_name in (".", "..") or e.reusable_name: + return e.reserved_name + + return f"_{e.reserved_name}" + + @classmethod + def add_trailing_underscore(cls, e: ValidationError) -> str: + """Reserved name handler that adds a trailing underscore (``"_"``) to the name + except for ``"."`` and ``".."``. + + Args: + e (ValidationError): A reserved name error. + + Returns: + str: The converted name. + """ + + if e.reserved_name in (".", "..") or e.reusable_name: + return e.reserved_name + + return f"{e.reserved_name}_" + + @classmethod + def as_is(cls, e: ValidationError) -> str: + """Reserved name handler that returns the name as is. + + Args: + e (ValidationError): A reserved name error. + + Returns: + str: The name as is. + """ + + return e.reserved_name diff --git a/env-llmeval/lib/python3.10/site-packages/pathvalidate/py.typed b/env-llmeval/lib/python3.10/site-packages/pathvalidate/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/Buenos_Aires b/env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/Buenos_Aires new file mode 100644 index 0000000000000000000000000000000000000000..d6f999b8605c9f73653a16e2ddbd5a49b96c0f56 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/Buenos_Aires differ diff --git a/env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/ComodRivadavia b/env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/ComodRivadavia new file mode 100644 index 0000000000000000000000000000000000000000..1dcc8d85434c9d016f170cb2f16811ebef327b77 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/ComodRivadavia differ diff --git a/env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/Mendoza b/env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/Mendoza new file mode 100644 index 0000000000000000000000000000000000000000..691c56978a033586e3302db2ef600e4b0ffd6366 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/Argentina/Mendoza differ diff --git a/env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbf687d781175f20438d85b06bccfd77d23365fd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/tzdata/zoneinfo/America/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Backtrace.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Backtrace.h new file mode 100644 index 0000000000000000000000000000000000000000..ac728968750297227c1be4aa3e444557c1899b03 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/Backtrace.h @@ -0,0 +1,2 @@ +#include +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/IListRef.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/IListRef.h new file mode 100644 index 0000000000000000000000000000000000000000..2bbf43975eacc4453dc33ae561b1f5ff71bd36a1 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/IListRef.h @@ -0,0 +1,631 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +/* + * [Note: IListRef] + * Wrapper around different API containers (e.g. boxed and unboxed). + * + * What is it? + * =========== + * It is a tagged union of both boxed and unboxed API containers. + * Working implementations: + * + * - `IListRef` + * - `IListRef` + * + * Note that `IListRef` is a view type. Meaning that it won't own the + * tensors it holds. It's intended to be used only as argument parameters. + * Specifically, where these 2 worlds overlap. + * + * What is this for? + * ================= + * Historically, PyTorch has maintained 2 different APIs: the unboxed + * (called from C++ API and Python eager mode) and boxed APIs (called + * from the TorchScript JIT, mobile interpreter, and boxed fallbacks). + * + * Calling unboxed kernels from the boxed "world" and vice-versa may + * result in non-negligible overhead. Lists are one of those types: + * + * - Boxed world: `c10::List` + * - Unboxed world: `c10::ArrayRef` + * + * In this context, `c10::IListRef` solves this problem by wrapping those + * 2 container types, so that we don't need to convert from one to + * the other. + * + * (see https://github.com/pytorch/pytorch/issues/66328) + * + * What does it do? + * ================ + * This container wraps around the different tagged containers + * (currently, only boxed and unboxed), without incurring in extra + * overhead for converting from one to another. It does so while + * exposing usual container methods, which dispatch to corresponding + * implementations. + * + * While it works with different container types, it introduces + * overhead for repeatedly calling member functions (since those will + * get dispatched, again). Therefore, you should only use it to iterate + * through the list up to one time. If you need to do more complex things, + * call `materialize()` first. + * + * Adding support for a new Tag + * ============================ + * Suppose we want to add a new tag: `Chest`. Here are the steps + * we would have to go through: + * + * 1. Add a line for it in the macro `TORCH_ILISTREF_FORALL_TAGS`. + * + * #define TORCH_ILISTREF_FORALL_TAGS(_, ...) \ + * ... + * _(Chest, ##__VA_ARGS__) + * + * 2. Add type aliases, union members, and constructors. + * + * template + * class IListRef { + * ... + * using chest_type = + * typename detail::IListRefTagImpl::list_type; + * ... + * IListRef(...) : tag_(IListRefTag::Chest) { + * ... + * } + * ... + * union Payload { + * ... + * chest_type chest; + * ... + * }; + * ... + * }; + * + * 3. Add a default implementation for it (in 'IListRef_inl.h'). It's + * preferable to make the default implementation work for `T = Tensor` + * (both `Unboxed` and `Boxed` do it). + * + * template + * class IListRefTagImplBase { + * public: + * using elem_type = ListElemT; + * using list_type = ChestContainer; + * + * static const list_type& unwrap(const IListRef& ilist) { ... } + * + * static typename list_type::const_iterator& unwrap( + * IListRefIterator& it) { ... } + * + * static const typename list_type::const_iterator& unwrap( + * const IListRefIterator& it) { ... } + * + * static IListRefConstRef iterator_get( + * const typename list_type::const_iterator& it) { ... } + * } + * + * 4. Add an specialization for each of the already supported types. + * Finally, for consistency, add them to the tracking list. + * (see [Note: IListRefTagImpl Specializations]) + * + * template <> + * class IListRefTagImpl + * : public IListRefTagImplBase {}; + * + * Adding support for a new Type + * ============================= + * Suppose we want to add support for a new type: `Matrix`. + * Here are the steps we would have to go through: + * + * 1. Add an specialization for each of the existing tags. + * For consistency, add them to the tracking list. + * (see [Note: IListRefTagImpl Specializations]) + * + * template <> + * class IListRefTagImpl + * : public IListRefTagImplBase {}; + * + * template <> + * class IListRefTagImpl + * : public IListRefTagImplBase {}; + * + * Common Problems + * =============== + * 1. One of `IListRef(Iterator)` methods are failing to compile. + * + * That may be happening because the container type you added + * is not compatible with the code written for that method. If + * that's true, then you might have to transform that code into + * a static method call (see `List::operator[]` method). + * + * 2. Can't make `IListRefIterator::operator*` return a const-reference. + * + * First, keep in mind that we assume that boxed containers will + * have to deal with `IValue` (e.g. `c10::List`). In this context, + * what may be happening is that `IValue` doesn't store internally + * your type `T`. Instead, it constructs a type new `T` everytime + * you try to get `T` for it (see `IListRef`). + */ + +namespace c10 { +template +class IListRef; + +/* + * Applies arbitrary macros to each `IListRefTag`. + */ +#define TORCH_ILISTREF_FORALL_TAGS(_, ...) \ + _(Unboxed, ##__VA_ARGS__) \ + _(Boxed, ##__VA_ARGS__) \ + _(Materialized, ##__VA_ARGS__) + +/* + * Defines a "switch-case" for `TAG`. Inside, it executes `BODY`, + * while bringing to scope: + * + * - `ImplT`: the implementation class for `TAG` + * - `this_`: the result of unwrapping `this` + */ +#define TORCH_ILISTREF_UNWRAP_CASE(TAG, BODY) \ + case c10::IListRefTag::TAG: { \ + using ImplT = c10::detail::IListRefTagImpl; \ + auto& this_ = ImplT::unwrap(*this); \ + BODY \ + } break; + +/* + * Dispatches the unwrap call, depending on `TAG`, followed by + * the execution of `BODY`. It aborts if `TAG` is not a `IListRefTag`. + * + * This macro is useful because it allows us to handle different + * types (that correspond to different tags) to be implemented + * only once. We can do it even when the implementation of the + * different tags aren't syntatically the same, by dispatching + * it to a function (e.g. `ImplT::(this_)`). + */ +#define TORCH_ILISTREF_UNWRAP(TAG, BODY) \ + switch (TAG) { \ + TORCH_ILISTREF_FORALL_TAGS(TORCH_ILISTREF_UNWRAP_CASE, BODY) \ + break; \ + default: \ + TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); \ + } + +enum class IListRefTag { +#define DEFINE_TAG(tag, ...) tag, + TORCH_ILISTREF_FORALL_TAGS(DEFINE_TAG) +#undef DEFINE_TAG + None +}; + +namespace detail { +/* + * Type alias that specifies whether we return a reference or a copy of `T`. + * + * What is this for? + * ================= + * Since values in the boxed world are represented by an `IValue`, we also + * depend on whether it can be converted to a const-reference (`Tensor`) or + * has to create a new copy of `T` (`OptionalTensorRef`). + */ +template +using IListRefConstRef = typename ivalue_to_const_ref_overload_return::type; + +/* + * Interface that implements key functions for each `IListRefTag` type. + * + * What is this for? + * ================= + * Given an `IListRef(Iterator)`, some methods have to be implemented + * differently for each `TAG`. Therefore, the methods inside this class + * are used as dispatch targets for the different `IListRefTag` values. + * + * You should create an specialization of this class for each possible + * combination of `IListRefTag` type (except `None`) and element types + * (e.g. `Tensor`). + * + * What does it do? + * ================ + * 1. defines static methods to be used as dispatch targets by both + * `IListRef` and `IListRefIterator` (see the implementation of + * `IListRefTagImplBase`). + * + * 2. defines the `elem_type` and `list_type` aliases that will be + * used in the definition of `IListRef`. In general, we should do + * so by inheriting from `IListRefTagImplBase`. + * + * [Note: IListRefTagImpl Specialization] + * ====================================== + * For `IListRef(Iterator)`: + * - + * - + * - + * + * For `IListRef(Iterator)`: + * - + * - + * - + */ +template +class IListRefTagImpl {}; + +/* + * Base implementation of `IListRefTagImpl` methods. + * + * What is this for? + * ================= + * This should make adding specializations for new types easier. For + * example, one should be able to add a new type just by making its + * `IListRefTagImpl` specialization inherit from `IListRefTagImplBase`. + * + * You should create a partial specialization for this class only if + * you introduce a new `IListRefTag`. The idea being that there is one + * default implementation for each possible value of `IListRefTag`. + * + * What does it do? + * ================ + * 1. defines `elem_type` as an alias to `ListElemT`. + * + * 1. defines `list_type` as an alias to the default container type + * that will hold a collection of `elem_type`. The idea being that + * all types tagged as `TAG` will have `list_type` as its container, + * with different `elem_type`. + * + * 3. defines the default implementation for each of the methods that + * are supposed to be defined on `IListRefTagImpl` specializations. + * + * 4. inheriting from `IListRefTagImplBase` also means + * that the payload of the type `IListRef` will be of type `list_type` + * when it is tagged as `TAG`. + */ +template +class IListRefTagImplBase {}; + +/* + * Materialized container for `IListRef`. + * + * What is this for? + * ================= + * Container that groups `T` references together. This exchanges the + * overhead of every method call from `IListRef` for a dynamic allocation. + * + * You should use this container instead of `IListRef` if: + * + * - You are going to iterate the list more than once + * - You need to repeatedly access arbitrary elements (using `operator[]`) + * What does it do? + + * ================ + * Removes the reference (&) from the type, and wraps it into a + * `std::reference_wrapper`. If `IListRefConstRef` is not a + * reference type, then it's left unchanged. + */ +template +using _MaterializedIListRefElem = typename std::conditional< + std::is_reference::value, + typename std::reference_wrapper::type>, + T>::type; + +template +using MaterializedIListRefElem = _MaterializedIListRefElem>; + +template +using MaterializedIListRef = std::vector>; + +} // namespace detail + +/* + * Iterator for `IListRef`. + * + * What is it? + * =========== + * Currently, a `std::bidirectional_iterator` that wraps the iterator + * types defined for each of the `IListRefTag`. + * + * One should be able to use it, as if it were the unwrapped + * iterators themselves. + + * What does it do? + * ================ + * Similarly to `IListRef`, this is a wrapper class. Specifically, it + * wraps each container's `const_iterator` type alias. So, for example, + * given that the container for `IListRefTag::Boxed` is `c10::List`, this + * iterator will wrap a `c10::List::const_iterator`. + * + * [Note: MSVC Iterator Debug] + * =========================== + * MSVC `vector::iterator` implementation (used in the boxed variant) + * makes it so this union's destructor, copy-constructor (assignment), and + * move-constructor (assignment) are implicitly deleted. + * + * Therefore, we need to explicitly define them as needed. Follows a list + * of places where these are needed and their reason: + * + * - `Payload` destructor: + * it is deleted only if the macro `_ITERATOR_DEBUG_LEVEL` is set to 2. + * + * - `IListRefIterator` destructor: + * same as above. However, we need to explicitly call the variant + * destructor explicitly. + * + * - `IListRefIterator` copy-constructor: + * it is deleted only if the macro `_ITERATOR_DEBUG_LEVEL` is different + * than 0. + */ +template +class IListRefIterator { + private: +#define DEFINE_FRIEND_CLASS(TAG, ...) \ + friend class detail::IListRefTagImpl; \ + friend class detail::IListRefTagImplBase< \ + IListRefTag::TAG, \ + T, \ + typename detail::IListRefTagImpl::elem_type>; + TORCH_ILISTREF_FORALL_TAGS(DEFINE_FRIEND_CLASS) +#undef DEFINE_FRIEND_CLASS + + public: + // C++17 friendly std::iterator implementation + using iterator_category = std::bidirectional_iterator_tag; + using value_type = T; + using difference_type = std::ptrdiff_t; + using pointer = T*; + using reference = T&; + + using unboxed_iterator_type = typename detail:: + IListRefTagImpl::list_type::const_iterator; + using boxed_iterator_type = typename detail:: + IListRefTagImpl::list_type::const_iterator; + using materialized_iterator_type = + typename detail::MaterializedIListRef::const_iterator; + + IListRefIterator() : tag_(IListRefTag::None) {} + +#if defined(_MSC_VER) && _ITERATOR_DEBUG_LEVEL != 0 + // See [Note: MSVC Iterator Debug] + IListRefIterator(const IListRefIterator& iterator) + : tag_(iterator.tag_) { + switch (tag_) { + case IListRefTag::Boxed: + payload_.boxed_iterator = iterator.payload_.boxed_iterator; + break; + case IListRefTag::Unboxed: + payload_.unboxed_iterator = iterator.payload_.unboxed_iterator; + break; + case IListRefTag::Materialized: + payload_.materialized_iterator = iterator.payload_.materialized_iterator; + break; + default: + TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); + } + } +#endif + +#if defined(_MSC_VER) && _ITERATOR_DEBUG_LEVEL == 2 + // See [Note: MSVC Iterator Debug] + ~IListRefIterator() noexcept(false) { + switch (tag_) { + case IListRefTag::Boxed: + payload_.boxed_iterator.~boxed_iterator_type(); + break; + case IListRefTag::Unboxed: + payload_.unboxed_iterator.~unboxed_iterator_type(); + break; + case IListRefTag::Materialized: + payload_.materialized_iterator.~materialized_iterator_type(); + break; + default: + TORCH_INTERNAL_ASSERT(false, "invalid IListRef tag."); + } + } +#endif + + IListRefIterator(boxed_iterator_type boxed) : tag_(IListRefTag::Boxed) { + payload_.boxed_iterator = boxed; + } + + IListRefIterator(unboxed_iterator_type unboxed) : tag_(IListRefTag::Unboxed) { + payload_.unboxed_iterator = unboxed; + } + + IListRefIterator(materialized_iterator_type materialized) : tag_(IListRefTag::Materialized) { + payload_.materialized_iterator = materialized; + } + + detail::IListRefConstRef operator*() const { + TORCH_ILISTREF_UNWRAP(tag_, { return ImplT::iterator_get(this_); }); + } + + IListRefIterator& operator++() { + TORCH_ILISTREF_UNWRAP(tag_, { ++this_; }); + return *this; + } + + IListRefIterator operator++(int) { + auto old = *this; + TORCH_ILISTREF_UNWRAP(tag_, { ++this_; }); + return old; + } + + IListRefIterator& operator--() { + TORCH_ILISTREF_UNWRAP(tag_, { --this_; }); + return *this; + } + + IListRefIterator operator--(int) { + auto old = *this; + TORCH_ILISTREF_UNWRAP(tag_, { --this_; }); + return old; + } + + bool operator==(const IListRefIterator& rhs) const { + if (tag_ != rhs.tag_) { + return false; + } + TORCH_ILISTREF_UNWRAP(tag_, { + auto& rhs_it = ImplT::unwrap(rhs); + return this_ == rhs_it; + }); + } + + bool operator!=(const IListRefIterator& rhs) const { + return !(*this == rhs); + } + + private: + union Payload { + boxed_iterator_type boxed_iterator; + unboxed_iterator_type unboxed_iterator; + materialized_iterator_type materialized_iterator; + void* _init_ptr; + Payload() : _init_ptr(nullptr) {} +#if defined(_MSC_VER) + // See [Note: MSVC Iterator Debug] + ~Payload() {} +#endif + }; + + Payload payload_; + IListRefTag tag_; +}; + +/* + * See [Note: IListRef] + */ +template +class IListRef { + private: +#define DEFINE_FRIEND_CLASS(TAG, ...) \ + friend class detail::IListRefTagImpl; \ + friend class detail::IListRefTagImplBase< \ + IListRefTag::TAG, \ + T, \ + typename detail::IListRefTagImpl::elem_type>; + TORCH_ILISTREF_FORALL_TAGS(DEFINE_FRIEND_CLASS) +#undef DEFINE_FRIEND_CLASS + + public: + using unboxed_type = + typename detail::IListRefTagImpl::list_type; + using boxed_type = + typename detail::IListRefTagImpl::list_type; + using materialized_type = + typename detail::MaterializedIListRef; + + using iterator = IListRefIterator; + using const_iterator = IListRefIterator; + using reverse_iterator = std::reverse_iterator; + using value_type = typename iterator::value_type; + + IListRef() : tag_(IListRefTag::None) {} + + IListRef(const boxed_type& boxed) : tag_(IListRefTag::Boxed) { + payload_.boxed = &boxed; + } + + IListRef(const unboxed_type& unboxed) : tag_(IListRefTag::Unboxed) { + payload_.unboxed = unboxed; + } + + IListRef(const std::initializer_list& list) : tag_(IListRefTag::Unboxed) { + payload_.unboxed = at::ArrayRef(list); + } + + template < + typename... UnboxedConstructorArgs, + typename = std::enable_if_t< + std::is_constructible::value>> + IListRef(UnboxedConstructorArgs&&... args) : tag_(IListRefTag::Unboxed) { + payload_.unboxed = unboxed_type(std::forward(args)...); + } + + IListRef(const materialized_type& materialized) : tag_(IListRefTag::Materialized) { + payload_.materialized = &materialized; + } + + size_t size() const { + TORCH_ILISTREF_UNWRAP(tag_, { return this_.size(); }); + } + + bool empty() const { + return size() == 0; + } + + iterator begin() const { + TORCH_ILISTREF_UNWRAP(tag_, { return this_.begin(); }); + } + + iterator end() const { + TORCH_ILISTREF_UNWRAP(tag_, { return this_.end(); }); + } + + detail::IListRefConstRef front() const { + TORCH_ILISTREF_UNWRAP(tag_, { return ImplT::front(this_); }); + } + + /* + * Materializes the `IListRef` into a `std::vector`. + * + * This should be used when one wishes to either: + * + * - iterate over the list more than once: each `IListRefIterator` + * member function call has to go through a switch, introducing + * non-negligible overhead + * + * - randomly access an arbitrary element using `operator[]`: + * same reason as above + */ + detail::MaterializedIListRef materialize() const { + if (isMaterialized()) { + return toMaterialized(); + } + + detail::MaterializedIListRef materialized; + materialized.reserve(size()); + for (const auto& t : *this) { + materialized.emplace_back(t); + } + return materialized; + } + +#define DEFINE_CHECK(TAG, ...) \ + bool is##TAG() const { \ + return tag_ == IListRefTag::TAG; \ + } + TORCH_ILISTREF_FORALL_TAGS(DEFINE_CHECK); +#undef DEFINE_CHECK + + bool isNone() const { + return tag_ == IListRefTag::None; + } + +#define DEFINE_CASTING(TAG, ...) \ + const typename detail::IListRefTagImpl::list_type& \ + to##TAG() const { \ + TORCH_INTERNAL_ASSERT(is##TAG()); \ + return detail::IListRefTagImpl::unwrap(*this); \ + } + TORCH_ILISTREF_FORALL_TAGS(DEFINE_CASTING); +#undef DEFINE_CASTING + + private: + union Payload { + const boxed_type* boxed; + unboxed_type unboxed; + const materialized_type* materialized; + Payload() : boxed(nullptr) {} + }; + + Payload payload_; + IListRefTag tag_; +}; + +} // namespace c10 + +#include diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/LegacyTypeDispatch.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/LegacyTypeDispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3efe5e0f7b87b66a1cf22ef1d5fb0f93e2f78494 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/LegacyTypeDispatch.h @@ -0,0 +1,111 @@ +#pragma once + +// The legacy mechanism for dispatching operators in ATen is a Type +// object, which is essentially a giant virtual dispatch table +// for every operation we support dynamically dispatching over. +// +// This has been deprecated in favor of ATenDispatch, and in the future, +// c10 dispatcher. +// TODO: Clean up what remains here + +#include + +namespace at { + +// A RAII, thread local (!) guard that will disable dispatch to variable +// handler. +// +// NOTE [ Treating Variables as non-Variables in type dispatch ] +// +// What exactly does AutoDispatchBelowAutograd do? The short answer is, it causes +// dispatches on ATen functions to go to the non-variable implementation, +// bypassing autograd handling (and also profiling and tracing). +// +// To understand why this guard exists, it's helpful to understand the history +// behind how Variable was implemented. Previously, Variables were implemented +// as a wrapper on Tensors; so the act of processing a Variable involved +// unwrapping the underlying Tensor, and then calling the underlying base +// operation on /that/ operation +// +// However, after the Variable/Tensor merge, there is no concept of unwrapping +// a tensor anymore. If you just call the operation on the same variable +// again inside your VariableType handler, you'll dispatch back to +// VariableType, which is not what we want. +// +// The solution to the above problem is to add `at::AutoDispatchBelowAutograd`, which +// when enabled will cause `legacyTensorType()` and `getType()` to always return +// non-Variable type, even if the tensor being called on is a variable. + +/* Note [AutoDispatchBelowAutograd] + * AutoDispatchBelowAutograd is **INTERNAL ONLY** that it should be used + * for kernel implementations and customized C++ kernels. + * If you are looking for a guard to run workload in inference mode, please use + * c10::InferenceMode RAII which is user facing API. + * In the past AutoDispatchBelowAutograd(or its old version AutoNonVariableTypeMode) + * was used in the user code for inference-only workload, this was under risk of + * producing wrong results silently in some edge cases. For example: + * ``` + * torch::Tensor s = torch::ones({1, 2, 3}).set_requires_grad(true); + * torch::Tensor out = s * s; + * { + * at::AutoDispatchBelowAutograd guard; + * s.add_(1); // Skips version bump on `s`. + * } + * // WRONG GRADIENT! s.grad() are now computed using `s` value after the + * // inplace update. + * out.backward(torch::ones_like(out)); + * ``` + * Users should use `c10::InferenceMode` here so that it'll properly throw an + * error saying "one of the variables needed for gradient computation has be modified." + */ +struct TORCH_API AutoDispatchBelowAutograd { + AutoDispatchBelowAutograd() : + autograd_guard_(c10::autograd_dispatch_keyset) { + } + + // disable all autograd dispatch keys + c10::impl::ExcludeDispatchKeyGuard autograd_guard_; +}; + +// TODO: AutoNonVariableTypeMode should be removed in release 1.10. +struct TORCH_API AutoNonVariableTypeMode { + AutoNonVariableTypeMode(bool enabled = true) : + autograd_guard_(c10::autograd_dispatch_keyset) { + TORCH_WARN_ONCE("AutoNonVariableTypeMode is deprecated and will be removed in 1.10 release. " + "For kernel implementations please use AutoDispatchBelowADInplaceOrView instead, " + "If you are looking for a user facing API to enable running your inference-only " + "workload, please use c10::InferenceMode. Using AutoDispatchBelowADInplaceOrView in user code " + "is under risk of producing silent wrong result in some edge cases. " + "See Note [AutoDispatchBelowAutograd] for more details."); + TORCH_INTERNAL_ASSERT(enabled); + } + + // disable all autograd dispatch keys + c10::impl::ExcludeDispatchKeyGuard autograd_guard_; +}; + +struct TORCH_API AutoDispatchSkipFunctionalize { + AutoDispatchSkipFunctionalize() : + dispatch_key_guard_(c10::DispatchKeySet(c10::DispatchKey::Functionalize)) { + } + c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_; +}; + +/* Note [AutoDispatchBelowADInplaceOrView] + * AutoDispatchBelowADInplaceOrView is equivalent to AutoNonVariableTypeMode + * before we split inplace & view ops out of VariableType kernel. + * Note this guard is used in VariableType kernels for functional ops + * as well as ADInplaceOrView kernels for inplace/view ops to enforce the + * Invariant: + * Once you are in VariableType/ADInplaceOrView kernel for an op, + * you never go back to a kernel on same dispatch key until + * you finish the current op. + */ +struct TORCH_API AutoDispatchBelowADInplaceOrView { + AutoDispatchBelowADInplaceOrView() : + dispatch_key_guard_(c10::autograd_dispatch_keyset_with_ADInplaceOrView) { + } + // disable Autograd & ADInplaceOrView dispatch keys + c10::impl::ExcludeDispatchKeyGuard dispatch_key_guard_; +}; +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/QuantizerBase.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/QuantizerBase.h new file mode 100644 index 0000000000000000000000000000000000000000..b6031f0d7798300f36798e6357f278056326f3a0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/QuantizerBase.h @@ -0,0 +1,83 @@ +#pragma once + +#include +#include +#include + +namespace at { + +class Tensor; +struct QTensorImpl; +struct Quantizer; +using ConstQuantizerPtr = const c10::intrusive_ptr&; +using QuantizerPtr = c10::intrusive_ptr; + +/** + * Quantizer is the class for storing all the information + * that's necessary to perform quantize and dequantize + * operation. + * + * We might have different types of quantization schemes and this is + * the base class for all quantizers. + * + * QTensorImpl will hold a pointer to Quantizer so that we can support + * different quantization schemes on Tensor. + * + * For example, the most common quantization scheme, Affine Quantization, + * requires scale and zero_point as parameters, we'll store scale and zero_point + * inside the instance and we can use it to quantize a float Tensor or + * dequantize a quantized Tensor. + * + * When you add new types of leaf Quantizer class, please also + * make sure to add a corresponding QScheme enum since + * they should have one to one mapping. + * + * Note about intrusive_ptr: + * Quantized Tensor holds an intrusive_ptr to Quantizer, and multiple Tensor can + * share the same Quantizer. Quantizer should be immutable. + */ +struct TORCH_API Quantizer : public c10::intrusive_ptr_target { + const ScalarType scalar_type_; + explicit Quantizer(ScalarType scalar_type) : scalar_type_(scalar_type) {} + ~Quantizer() override; + + // Copied from torch/csrc/jit/ir/scope.h + QuantizerPtr intrusive_from_this() { + c10::raw::intrusive_ptr::incref(this); // we are creating a new pointer + // from a raw `this` pointer + // so we need to bump the refcount + // to account for this ownership + return c10::intrusive_ptr::reclaim(this); + } + + /** + * Each concrete Quantizer type should have a unique QScheme type. + */ + virtual QScheme qscheme() const = 0; + + ScalarType scalar_type() const { + return scalar_type_; + } + + /** + * quantize a float Tensor into a quantized Tensor. + */ + virtual Tensor quantize(const Tensor& t) = 0; + + /** + * dequantize a quantized Tensor into a float Tensor. + */ + virtual Tensor dequantize(const Tensor& t) = 0; + + /** + * dequantize a quantized Tensor into a float Tensor, out= variant + */ + virtual Tensor& dequantize_out(Tensor& out, const Tensor& t) = 0; + + /** + * Compare against `other` for equality. + */ + virtual bool equalTo(QuantizerPtr other) const = 0; +}; + +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/VariableHooksInterface.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/VariableHooksInterface.h new file mode 100644 index 0000000000000000000000000000000000000000..f75342dea76a976579880ad1fa3491f8ca9881fa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/VariableHooksInterface.h @@ -0,0 +1,75 @@ +#pragma once + +#include +#include + +// A little explanation about why this file exists at all. We have +// a few methods on Tensor class which require access to reified access to +// AutogradMeta. In open source, this isn't a big deal: we just access +// torch/csrc/autograd/variable.h from aten/src/ATen/core/Tensor.cpp and +// we can put the definitions inline. This is because everything gets balled +// into a single dynamic library in the end. +// +// However, inside our Facebook internal version of our build system, we +// have a split between aten and torch/csrc. So we cannot simply just +// cross this boundary. "Now wait," you might say, "Why don't we just +// merge the libraries inside Facebook". Well, the problem is that there +// are some downstream applications which are at binary size limit, and +// incorporating all of the extra code from libtorch would push them +// over (admarket/adreview/service:adreviewservice, see also +// https://github.com/pytorch/pytorch/pull/29299) So if you want to do that, +// we have to fix all of the services like this. +// +// I didn't want to block eliminating Tensor-Variable on this work, so I +// had to introduce another dynamic dispatch to get to the variable +// implementations (which live in torch/csrc/autograd/variable.cpp, FYI). +// +// I also considered using our existing dynamic dispatch mechanism, c10 +// dispatcher, to do this. However, (1) some of the functions on Tensor +// have weird signatures that are not supported by autograd, and (2) +// see this bug https://github.com/pytorch/pytorch/issues/30102 + +namespace torch { namespace autograd { + +struct Node; + +}} // namespace torch::autograd + +namespace at { +namespace impl { + +struct TORCH_API VariableHooksInterface { + virtual ~VariableHooksInterface() = default; + virtual TensorBase tensor_data(const TensorBase&) const = 0; + virtual TensorBase variable_data(const TensorBase&) const = 0; + virtual const std::shared_ptr& grad_fn(const TensorBase&) const = 0; + virtual unsigned _register_hook( + const TensorBase&, + std::function hook) const = 0; + virtual void remove_hook(const TensorBase&, unsigned pos) const = 0; + virtual bool is_view(const TensorBase&) const = 0; + virtual const TensorBase& base(const TensorBase&) const = 0; + virtual const std::string& name(const TensorBase&) const = 0; + virtual bool is_leaf(const TensorBase&) const = 0; + virtual int64_t output_nr(const TensorBase&) const = 0; + virtual void set_data(const TensorBase&, const TensorBase&) const = 0; + virtual TensorBase data(const TensorBase&) const = 0; + virtual int64_t _version(const TensorBase&) const = 0; + virtual void retain_grad(const TensorBase&) const = 0; + virtual bool retains_grad(const TensorBase&) const = 0; + virtual void _backward(const Tensor&, TensorList, const c10::optional&, c10::optional, bool) const = 0; + virtual void requires_grad_(const TensorBase&, bool) const = 0; + virtual void basic_autograd_not_implemented_fallback(const c10::OperatorHandle& op, c10::DispatchKeySet dispatch_keys, torch::jit::Stack* stack) const = 0; +}; + +TORCH_API void SetVariableHooks(VariableHooksInterface* hooks); +TORCH_API VariableHooksInterface* GetVariableHooks(); +TORCH_API bool HasVariableHooks(); + +struct TORCH_API VariableHooksRegisterer { + explicit VariableHooksRegisterer(VariableHooksInterface* hooks) { + SetVariableHooks(hooks); + } +}; + +}} // namespace at::impl diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dynamic_type.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dynamic_type.h new file mode 100644 index 0000000000000000000000000000000000000000..ecfb6a999bc2e29e41660a819e82276398a9041b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/dynamic_type.h @@ -0,0 +1,239 @@ +#pragma once + +#include +#include +#include + +#include +#include + +namespace c10 { + +using DynamicTypeBits = std::uint32_t; +#define DYNAMIC_TYPE_BIT(x) (1u << x) + +constexpr DynamicTypeBits kDynamicCovariantTypeBit = DYNAMIC_TYPE_BIT(31); +constexpr DynamicTypeBits kDynamicAnyTypeBit = DYNAMIC_TYPE_BIT(30); + +constexpr DynamicTypeBits kDynamicNoneTypeBit = DYNAMIC_TYPE_BIT(1); +constexpr DynamicTypeBits kDynamicIntTypeBit = DYNAMIC_TYPE_BIT(3); +constexpr DynamicTypeBits kDynamicFloatTypeBit = DYNAMIC_TYPE_BIT(4); +constexpr DynamicTypeBits kDynamicComplexTypeBit = DYNAMIC_TYPE_BIT(5); +constexpr DynamicTypeBits kDynamicListTypeBit = DYNAMIC_TYPE_BIT(7); +constexpr DynamicTypeBits kDynamicTupleTypeBit = DYNAMIC_TYPE_BIT(8); +constexpr DynamicTypeBits kDynamicClassTypeBit = DYNAMIC_TYPE_BIT(10); + +#define FORALL_DYNAMIC_TYPES(_) \ + _(Tensor, DYNAMIC_TYPE_BIT(0), 1) \ + _(None, kDynamicNoneTypeBit, 1) \ + _(Bool, DYNAMIC_TYPE_BIT(2), 1) \ + _(Int, kDynamicIntTypeBit, 1) \ + _(Float, kDynamicFloatTypeBit, 1) \ + _(Complex, kDynamicComplexTypeBit, 1) \ + _(Number, \ + (kDynamicIntTypeBit | kDynamicFloatTypeBit | kDynamicComplexTypeBit), \ + 1) \ + _(String, DYNAMIC_TYPE_BIT(6), 1) \ + _(List, kDynamicListTypeBit, 0) \ + _(Tuple, (kDynamicTupleTypeBit | kDynamicCovariantTypeBit), 0) \ + _(Dict, DYNAMIC_TYPE_BIT(9), 0) \ + _(Class, kDynamicClassTypeBit, 0) \ + _(Optional, \ + (DYNAMIC_TYPE_BIT(11) | kDynamicNoneTypeBit | kDynamicCovariantTypeBit), \ + 0) \ + _(AnyList, (kDynamicListTypeBit | kDynamicAnyTypeBit), 1) \ + _(AnyTuple, \ + (kDynamicTupleTypeBit | kDynamicCovariantTypeBit | kDynamicAnyTypeBit), \ + 1) \ + _(DeviceObj, DYNAMIC_TYPE_BIT(12), 1) \ + _(StreamObj, DYNAMIC_TYPE_BIT(13), 1) \ + _(Capsule, DYNAMIC_TYPE_BIT(14), 1) \ + _(Generator, DYNAMIC_TYPE_BIT(15), 1) \ + _(Storage, DYNAMIC_TYPE_BIT(16), 1) \ + _(Var, DYNAMIC_TYPE_BIT(17), 0) \ + _(AnyClass, (kDynamicClassTypeBit | kDynamicAnyTypeBit), 1) \ + _(QScheme, DYNAMIC_TYPE_BIT(18), 1) \ + _(Quantizer, DYNAMIC_TYPE_BIT(19), 1) \ + _(AnyEnum, DYNAMIC_TYPE_BIT(20), 1) \ + _(RRef, DYNAMIC_TYPE_BIT(21), 0) \ + _(Future, DYNAMIC_TYPE_BIT(22), 0) \ + _(Await, DYNAMIC_TYPE_BIT(23), 0) \ + _(Any, 0xffffffff, 1) + +#define FORALL_DYNAMIC_TYPES_FAKE(_) \ + _(ScalarType, kDynamicIntTypeBit, 1) \ + _(Layout, kDynamicIntTypeBit, 1) \ + _(SymInt, kDynamicIntTypeBit, 1) \ + _(MemoryFormat, kDynamicIntTypeBit, 1) + +#define FORWARD_DECL_TYPE(NAME, _, __) struct NAME ## Type; + FORALL_DYNAMIC_TYPES(FORWARD_DECL_TYPE) + FORALL_DYNAMIC_TYPES_FAKE(FORWARD_DECL_TYPE) +#undef FORWARD_DECL_TYPE + +class DynamicType; +using DynamicTypePtr = std::shared_ptr; + +/** + * DynamicType is designed as a low dependency type system for TorchScript. The + * existing JIT types are used for both compilation and runtime, which makes + * sense for server contexts because we often compile and run the model in + * the same process, however this doesn't hold for mobile devices where we + * always compiles a model ahead of time, therefore there will be dependencies + * which are not needed, but built with mobile runtime causing binary size + * bloat, by design. Every basic type like Int, Bool or String will bring their + * vtable, typeinfo, constructor, destructor and even more data from their + * specializations for STL types to the binary causing a long tail bloat. + * + * The core problem is about the complexity to implement and maintain a single + * type system for both analysis and execution purposes. Although they should + * have the exactly same semantics, in practice implement a unified abstraction + * adds conceptual and representational overhead for both sides of the world. + * + * To address the issues, DynamicType implements a minimal subset of JIT types + * and uses a generic algorithm to test all subtyping relations. To achieve + * this, we assign each dynamic type a single integer tag to represent its + * semantics. More specifically, a dynamic type is defined as a set of "control + * bits" and "data bits", where control bits describe the special behavior when + * testing a type and data bits map to identity of each nominal type. We use bit + * operations to perform all the tests. + * + * For example, a "covariant bit" is a control bit used to describe if a type + * is covariant, right now the most used one is tuple type, and in addition to + * the control bit, tuple type's data bit is the 8th bit from the LSB. Control + * bits start from MSB and data bits start from LSB. + * + * If two types are equal, then they are subtype of each other, also if the bits + * from one type tag is subset of the other tag, it automatically becomes a + * subtype of the other. This simplifies the subtyping logic a lot, and over the + * long term it is possible to adopt this scheme on the server side as well. + * Special cases can be added but they generally should not take too much code + * size. + * + * DynamicType may or may not inherit from c10::Type because it's not the core + * requirement of DynamicType to interface with existing JIT types, but we might + * want to inherit from c10::Type to reduce the migration cost. + */ +class DynamicType : public SharedType { + using ClassTypePtr = std::shared_ptr; + + /** + * A implementation detail to support NamedTuple. + */ + struct LabeledDynamicType { + c10::optional label; + DynamicTypePtr ty; + explicit LabeledDynamicType(DynamicTypePtr t) : ty(std::move(t)) {} + + bool equals(const LabeledDynamicType& other) const; + bool isSubtypeOf(const LabeledDynamicType& other) const; + }; + + public: + // TODO Change Ptr to DynamicTypePtr when all migrations are done. + using Ptr = TypePtr; + using ElementType = DynamicType; + ~DynamicType() override; + + struct Arguments { + Arguments() = default; + Arguments(c10::ArrayRef); + Arguments(const std::vector&, c10::ArrayRef); + std::vector elems; + }; + + enum class Tag : DynamicTypeBits { +#define DYNAMIC_TYPE_ITEM(NAME, VAL, _) NAME = VAL, + FORALL_DYNAMIC_TYPES(DYNAMIC_TYPE_ITEM) + FORALL_DYNAMIC_TYPES_FAKE(DYNAMIC_TYPE_ITEM) +#undef DYNAMIC_TYPE_ITEM + }; + + bool equals(const Type& rhs) const override; + bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const override; + std::string str() const override; + static const TypeKind Kind = TypeKind::DynamicType; + static TORCH_API DynamicTypePtr create(Type& ty); + + explicit DynamicType(Tag, Arguments); + explicit DynamicType(Tag, c10::string_view, Arguments); + + TypePtr containedType(size_t) const override; + size_t containedTypeSize() const override; + Tag tag() const { + return tag_; + } + const c10::optional& name() const { + return name_; + } + const Arguments& arguments() const { + return arguments_; + } + TORCH_API TypeKind dynamicKind() const; + + // Should be used only on the server side to restore static type information. +#ifndef C10_MOBILE + TORCH_API +#endif + TypePtr fallback() const; + + private: + bool symmetric() const override { + return false; + } + friend struct Type; + static std::shared_ptr create(const Type& ty); + DynamicType(const Type& other); + bool equals(const DynamicType& other) const; + + template + bool compareArguments(const DynamicType& other, F&& f) const { + if (arguments_.elems.size() != other.arguments_.elems.size()) { + return false; + } + for (size_t i = 0; i < arguments_.elems.size(); i++) { + if (!f(arguments_.elems[i], other.arguments_.elems[i])) { + return false; + } + } + return true; + } + + Tag tag_; + c10::optional name_; + union { + Arguments arguments_; + ClassTypePtr class_; + }; +}; + +template +struct DynamicTypeTrait { + C10_NOINLINE static auto tagValue() { + TORCH_CHECK(false); + return DynamicType::Tag::Any; + } +}; + +namespace detail { +C10_NOINLINE DynamicTypePtr makeBaseType(DynamicType::Tag tag); +} + +#define DYNAMIC_TYPE_TAG_VALUE(NAME, _, IS_BASE_TYPE) \ + template <> \ + struct TORCH_API DynamicTypeTrait { \ + C10_ERASE static auto tagValue() { \ + return DynamicType::Tag::NAME; \ + } \ + static constexpr bool isBaseType = IS_BASE_TYPE; \ + template \ + static std::enable_if_t getBaseType() { \ + static auto type = detail::makeBaseType(tagValue()); \ + return type; \ + } \ + }; // namespace c10 +FORALL_DYNAMIC_TYPES(DYNAMIC_TYPE_TAG_VALUE) +FORALL_DYNAMIC_TYPES_FAKE(DYNAMIC_TYPE_TAG_VALUE) +#undef DYNAMIC_TYPE_TAG_VALUE + +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/jit_type_base.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/jit_type_base.h new file mode 100644 index 0000000000000000000000000000000000000000..bf4909aaefec7baa64e1f2d4b8b3a06743dff253 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/jit_type_base.h @@ -0,0 +1,719 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { + +#define C10_FORALL_TYPES(_) \ + _(AnyType) \ + _(EnumType) \ + _(AnyEnumType) \ + _(TensorType) \ + _(StorageType) \ + _(TupleType) \ + _(ListType) \ + _(DictType) \ + _(NumberType) \ + _(FloatType) \ + _(ComplexType) \ + _(FutureType) \ + _(AwaitType) \ + _(RRefType) \ + _(IntType) \ + _(NoneType) \ + _(StringType) \ + _(GeneratorType) \ + _(QuantizerType) \ + _(BoolType) \ + _(OptionalType) \ + _(VarType) \ + _(DeviceObjType) \ + _(StreamObjType) \ + _(FunctionType) \ + _(ClassType) \ + _(PyObjectType) \ + _(CapsuleType) \ + _(InterfaceType) \ + _(QSchemeType) \ + _(ScalarTypeType) \ + _(LayoutType) \ + _(MemoryFormatType) \ + _(AnyListType) \ + _(AnyTupleType) \ + _(AnyClassType) \ + _(SymIntType) \ + _(SymFloatType) \ + _(SymBoolType) \ + _(UnionType) \ + _(DynamicType) + +enum class TypeKind { +#define DEFINE_TYPE(T) T, + C10_FORALL_TYPES(DEFINE_TYPE) +#undef DEFINE_TYPE +}; + +TORCH_API const char* typeKindToString(TypeKind kind); + +struct Type; +struct SharedType; + +// Use this to customize how a Type is printed using `annotation_str()`. If +// c10::nullopt is returned, `annotation_str()` falls through to its default +// implementation. +using TypePrinter = std::function(const Type&)>; + +namespace detail { +template +struct IsSingletonType : public std::integral_constant {}; +} // namespace detail +#define TORCH_DECLARE_SINGLETON(Type) \ + struct Type; \ + namespace detail { \ + template <> struct IsSingletonType : public std::integral_constant {}; \ + } + +TORCH_DECLARE_SINGLETON(AnyType); +TORCH_DECLARE_SINGLETON(AnyEnumType); +TORCH_DECLARE_SINGLETON(NumberType); +TORCH_DECLARE_SINGLETON(FloatType); +TORCH_DECLARE_SINGLETON(ComplexType); +TORCH_DECLARE_SINGLETON(IntType); +TORCH_DECLARE_SINGLETON(BoolType); +TORCH_DECLARE_SINGLETON(StringType); +TORCH_DECLARE_SINGLETON(StorageType); +TORCH_DECLARE_SINGLETON(NoneType); +TORCH_DECLARE_SINGLETON(GeneratorType); +TORCH_DECLARE_SINGLETON(QuantizerType); +TORCH_DECLARE_SINGLETON(QSchemeType); +TORCH_DECLARE_SINGLETON(DeviceObjType); +TORCH_DECLARE_SINGLETON(StreamObjType); +TORCH_DECLARE_SINGLETON(CapsuleType); +TORCH_DECLARE_SINGLETON(PyObjectType); +TORCH_DECLARE_SINGLETON(ScalarTypeType); +TORCH_DECLARE_SINGLETON(LayoutType); +TORCH_DECLARE_SINGLETON(MemoryFormatType); +TORCH_DECLARE_SINGLETON(AnyListType); +TORCH_DECLARE_SINGLETON(AnyTupleType); +TORCH_DECLARE_SINGLETON(AnyClassType); + +namespace detail { +template +struct CastReturnType { + using type = std::shared_ptr; +}; + +template +struct CastReturnType::value>::type> { + using type = SingletonTypePtr; +}; + +template +struct CastConstReturnType { + using type = std::shared_ptr; +}; + +template +struct CastConstReturnType::value>::type> { + using type = SingletonTypePtr; +}; + +template +struct as_shared_type { + using type = SharedType*; +}; + +template +struct as_shared_type { + using type = const SharedType *; +}; +} // namespace detail + +struct TORCH_API Type { + friend TORCH_API bool operator==(const Type& lhs, const Type& rhs); + private: + TypeKind kind_; + + protected: + Type(TypeKind kind) : kind_(kind) {} + + Type(const Type&) = default; + Type& operator=(const Type&) = default; + Type(Type&&) noexcept = default; + Type& operator=(Type&&) noexcept = default; + + virtual std::string annotation_str_impl(TypePrinter /*printer*/) const { + return str(); + } + // a == b + virtual bool equals(const Type& rhs) const = 0; + // a == b <=> b == a + virtual bool symmetric() const { + return true; + } + + public: + template + class SingletonOrSharedTypePtr { + public: + using element_type = typename std::shared_ptr::element_type; + + SingletonOrSharedTypePtr() = default; + + /* implicit */ SingletonOrSharedTypePtr(std::shared_ptr x) + : repr_(std::move(x)) {} + + template ::value, bool> = true> + /* implicit */ SingletonOrSharedTypePtr(std::shared_ptr x) + : repr_(std::move(x)) {} + + /* implicit */ SingletonOrSharedTypePtr(std::nullptr_t) + : repr_(nullptr) {} + + /* implicit */ SingletonOrSharedTypePtr(SingletonTypePtr p) + : repr_(p) {} + + template ::value, bool> = true> + /* implicit */ SingletonOrSharedTypePtr(SingletonTypePtr p) + : repr_(SingletonTypePtr(p.get())) {} + + + // We need to support construction from T* for pybind. The problem + // is that it's not clear if we are supposed to be taking shared + // ownership or not. + // + // Case 1: if T is known statically to derive from SharedType, we should use + // shared_from_this() and take shared_ownership. + // + // Case 2: if T is exactly Type, we need to do a dynamic_cast to + // check if it's a SharedType and do the right thing. + // + // Case 3: Otherwise, T is not a SharedType. (debug-check this + // assumption!) Use a singleton pointer. + + template ::value, bool> = true> + /* implicit */ SingletonOrSharedTypePtr(T* p) : SingletonOrSharedTypePtr(static_cast::type>(p)->shared_from_this()) {} + + template ::value, bool> = true> + /* implicit */ SingletonOrSharedTypePtr(T* p) { + if (auto* shared_p = dynamic_cast::type>(p)) { + repr_ = Repr(shared_p->shared_from_this()); + } else { + repr_ = Repr(p); + } + } + + template ::value && !std::is_base_of::value, bool> = true> + /* implicit */ SingletonOrSharedTypePtr(T* p) + : repr_(p) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(dynamic_cast::type>(p) == nullptr); + } + + SingletonOrSharedTypePtr(const SingletonOrSharedTypePtr&) = default; + SingletonOrSharedTypePtr(SingletonOrSharedTypePtr&&) noexcept = default; + SingletonOrSharedTypePtr& operator=(const SingletonOrSharedTypePtr&) = default; + SingletonOrSharedTypePtr& operator=(SingletonOrSharedTypePtr&&) noexcept = default; + + T* get() const { + return repr_.isSharedAndNonNull() ? repr_.shared_.repr_.get() : static_cast(repr_.rawRepr().first); + } + + operator bool() const { + return repr_.isNonNull(); + } + + bool operator==(std::nullptr_t) const { + return !repr_.isNonNull(); + } + + bool operator!=(std::nullptr_t) const { + return repr_.isNonNull(); + } + + template , void>::value, bool> = true> + U& operator*() const { + return *get(); + } + + T* operator->() const { + return get(); + } + + private: + // NOTE: SharedPtrWrapper exists to work around a baffling bug in + // nvcc; see comment in destroy() below. + struct SharedPtrWrapper { + SharedPtrWrapper(std::shared_ptr &&x) + : repr_(std::move(x)) {} + std::shared_ptr repr_; + }; + union Repr { + Repr() : Repr(nullptr) {} + + explicit Repr(std::shared_ptr x) + : shared_(std::move(x)) {} + + explicit Repr(std::nullptr_t) + : singletonRepr_(nullptr) {} + + explicit Repr(SingletonTypePtr p) + : singletonRepr_(p.get()) {} + + ~Repr() { + destroy(); + } + + // NOTE: the only non-UB way to access our null state is through + // rawRepr(), because our copy operation doesn't preserve which + // union member is active for null pointers. + Repr(const Repr& rhs) { + if (rhs.isSharedAndNonNull()) { + new (&shared_) SharedPtrWrapper(rhs.shared_); + } else { + singletonRepr_.singleton_ = static_cast(rhs.rawRepr().first); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.singletonRepr_.unused_ == nullptr); + singletonRepr_.unused_ = nullptr; + } + } + + Repr(Repr&& rhs) noexcept { + if (rhs.isSharedAndNonNull()) { + new (&shared_) SharedPtrWrapper(std::move(rhs.shared_)); + } else { + singletonRepr_.singleton_ = static_cast(rhs.rawRepr().first); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.singletonRepr_.unused_ == nullptr); + singletonRepr_.unused_ = nullptr; + } + } + + Repr& operator=(const Repr& rhs) { + if (&rhs == this) { + return *this; + } + if (rhs.isSharedAndNonNull()) { + if (isSharedAndNonNull()) { + shared_ = rhs.shared_; + } else { + new (&shared_) SharedPtrWrapper(rhs.shared_); + } + } else { + if (isSharedAndNonNull()) { + destroy(); + } + singletonRepr_.singleton_ = static_cast(rhs.rawRepr().first); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.rawRepr().nullIfSingleton_ == nullptr); + singletonRepr_.unused_ = nullptr; + } + return *this; + } + + Repr& operator=(Repr&& rhs) noexcept { + if (&rhs == this) { + return *this; + } + if (rhs.isSharedAndNonNull()) { + if (isSharedAndNonNull()) { + shared_ = std::move(rhs.shared_); + } else { + new (&shared_) SharedPtrWrapper(std::move(rhs.shared_)); + } + } else { + if (isSharedAndNonNull()) { + destroy(); + } + singletonRepr_.singleton_ = static_cast(rhs.rawRepr().first); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(rhs.rawRepr().nullIfSingleton_ == nullptr); + singletonRepr_.unused_ = nullptr; + } + return *this; + } + + SharedPtrWrapper shared_; + + struct SingletonRepr { + explicit SingletonRepr(T* s) : singleton_(s) {} + T* singleton_; + void* unused_ = nullptr; + } singletonRepr_; + struct RawRepr { + void* first; + void* nullIfSingleton_; + }; + + // It is UB to read the singleton part of Repr if it was + // constructed as a shared_ptr and vice versa, but memcpying out + // the representation is always OK, so here's an accessor to obey + // the letter of the law. + RawRepr rawRepr() const { + RawRepr repr{}; + memcpy(&repr, reinterpret_cast(this), sizeof(RawRepr)); + return repr; + } + + bool isNonNull() const { + auto repr = rawRepr(); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(repr.nullIfSingleton_ == nullptr || repr.first != nullptr); + return repr.first != nullptr; + } + + bool isSharedAndNonNull() const { + return rawRepr().nullIfSingleton_ != nullptr; + } + + private: + void destroy() { + if (isSharedAndNonNull()) { + // Without SharedPtrWrapper, this line would read + // `shared_.~shared_ptr()` and nvcc would complain with + // "error: expected primary-expression before '>' token" + // referring to the "t" in "shared_ptr". SharedPtrWrapper + // exists to work around this compiler bug. + shared_.~SharedPtrWrapper(); + } + } + } repr_; + }; + + using TypePtr = SingletonOrSharedTypePtr; + using Ptr = TypePtr; + using ElementType = Type; + + // subtyping relation. By default, we return true for the case + // when the type is exactly equal or if this <: T where rhs = Optional[T] + + // if this returns false and the why_not stream is non-null, it contains + // additional details that describe why this is not a subtype of 'rhs'. + // This additional information should only contain details that are not + // obvious from the annotation_str() that describes the type. For instance it + // is clear that `int <: str` is false but not clear why `Foo <: InterfaceBar` + // might be false. + virtual bool isSubtypeOfExt(const Type& rhs, std::ostream* why_not) const; + virtual bool is_module() const; + bool isSubtypeOf(const Type& rhs) const { + return isSubtypeOfExt(rhs, nullptr); + } + // Compatibility shims to accommodate existing code that passes shared_ptrs + // around. Ideally, we would just delete this, but it should be harmless. + template + typename std::enable_if::value, bool>::type + isSubtypeOf(const std::shared_ptr& rhs) const { + return isSubtypeOf(*rhs); + } + + template + typename std::enable_if::value, bool>::type + isSubtypeOf(const SingletonOrSharedTypePtr& rhs) const { + return isSubtypeOf(*rhs); + } + + template + typename std::enable_if::value, bool>::type + isSubtypeOf(SingletonTypePtr rhs) const { + return isSubtypeOf(*rhs); + } + + template + typename std::enable_if::value, bool>::type + isSubtypeOfExt(const SingletonOrSharedTypePtr& rhs, std::ostream* why_not) const { + return isSubtypeOfExt(*rhs, why_not); + } + + template + typename std::enable_if::value, bool>::type + isSubtypeOfExt(const std::shared_ptr& rhs, std::ostream* why_not) const { + return isSubtypeOfExt(*rhs, why_not); + } + + template + typename std::enable_if::value, bool>::type + isSubtypeOfExt(SingletonTypePtr rhs, std::ostream* why_not) const { + return isSubtypeOfExt(*rhs, why_not); + } + + // How this type will appear in FunctionSchema declarations + virtual std::string str() const = 0; + + // How this type will appear as if it were a type annotation in Python + // which is sometimes different than how it appears in declarations (e.g. + // int[] vs List[int]) + // + // Takes a custom printer that users can pass in to customize the output of + // this method. + std::string annotation_str(TypePrinter printer) const { + if (printer) { + // the printer can return nullopt to fall through to the default impl + if (auto renamed = printer(*this)) { + return *renamed; + } + } + return annotation_str_impl(std::move(printer)); + } + std::string annotation_str() const { + // Overload instead of define a default value for `printer` to help + // debuggers out. + return annotation_str(nullptr); + } + + // Returns a human readable string that includes additional information like + // "type is inferred rather than explicitly defined" to help construct more + // user-friendly messages. + virtual std::string repr_str() const { + return annotation_str(); + } + + TypeKind kind() const { + return kind_; + } + + virtual bool isUnionType() const { + return false; + } + + virtual bool requires_grad() const { + for (const auto& ct : containedTypes()) { + if (ct->requires_grad()) { + return true; + } + } + return false; + } + + // Dynamically cast this object to the subclass indicated by the + // template variable, returning nullptr if the cast is invalid. + template ::value, bool> = true> + typename detail::CastReturnType::type cast() { + if (T::Kind == kind()) { + return std::static_pointer_cast(static_cast(this)->shared_from_this()); + } + return nullptr; + } + template ::value, bool> = true> + typename detail::CastReturnType::type cast() { + if (T::Kind == kind()) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(this == T::get().get()); + return typename detail::CastReturnType::type(static_cast(this)); + } + return nullptr; + } + template ::value, bool> = true> + typename detail::CastConstReturnType::type cast() const { + if (T::Kind == kind()) { + return std::static_pointer_cast(static_cast(this)->shared_from_this()); + } + return nullptr; + } + template ::value, bool> = true> + typename detail::CastConstReturnType::type cast() const { + if (T::Kind == kind()) { + TORCH_INTERNAL_ASSERT_DEBUG_ONLY(this == T::get().get()); + return typename detail::CastConstReturnType::type(static_cast(this)); + } + return nullptr; + } + template + T* castRaw() { + if (T::Kind == kind()) { + return static_cast(this); + } + return nullptr; + } + template + const T* castRaw() const { + if (T::Kind == kind()) { + return static_cast(this); + } + return nullptr; + } + template + auto expect() { + auto r = cast(); + AT_ASSERT(r); + return r; + } + template + auto expect() const { + auto r = cast(); + AT_ASSERT(r); + return r; + } + template + T& expectRef() { + auto* r = castRaw(); + AT_ASSERT(r); + return *r; + } + template + const T& expectRef() const { + auto* r = castRaw(); + AT_ASSERT(r); + return *r; + } + virtual ~Type() = default; + virtual bool hasFreeVariables() const { + return false; + } + // list of types this type contains, e.g. for a List then element type of a + // list for a tuple, the types of the tuple elements + virtual at::ArrayRef containedTypes() const { + return {}; + } + virtual TypePtr containedType(size_t i) const { + return containedTypes().at(i); + } + virtual size_t containedTypeSize() const { + return containedTypes().size(); + } + // create a new version of this type, replacing its contained types with + // contained_types + TypePtr withContained(std::vector contained_types); + // per-type constructor, you only need to override this if the + // containedTypes() is not empty + virtual TypePtr createWithContained( + std::vector /*contained_types*/) const { + AT_ERROR( + "type with contained types did not overload createWithContained: ", + str()); + } + +}; + +template +using SingletonOrSharedTypePtr = Type::SingletonOrSharedTypePtr; + + +template +bool operator==(const SingletonOrSharedTypePtr& x, const SingletonOrSharedTypePtr& y) { + return (void*)x.get() == (void*)y.get(); +} + +template +bool operator==(const SingletonOrSharedTypePtr& x, const std::shared_ptr& y) { + return (void*)x.get() == (void*)y.get(); +} + +template +bool operator==(const std::shared_ptr& x, const SingletonOrSharedTypePtr& y) { + return (void*)x.get() == (void*)y.get(); +} + +template +bool operator==(const SingletonOrSharedTypePtr& x, const SingletonTypePtr& y) { + return (void*)x.get() == (void*)y.get(); +} + +template +bool operator==(const SingletonTypePtr& x, const SingletonOrSharedTypePtr& y) { + return (void*)x.get() == (void*)y.get(); +} + +template +bool operator!=(const SingletonOrSharedTypePtr& x, const SingletonOrSharedTypePtr& y) { + return !(x == y); +} + +template +bool operator!=(const SingletonOrSharedTypePtr& x, const std::shared_ptr& y) { + return !(x == y); +} + +template +bool operator!=(const std::shared_ptr& x, const SingletonOrSharedTypePtr& y) { + return !(x == y); +} + +template +bool operator!=(const SingletonOrSharedTypePtr& x, const SingletonTypePtr& y) { + return !(x == y); +} + +template +bool operator!=(const SingletonTypePtr& x, const SingletonOrSharedTypePtr& y) { + return !(x == y); +} + +using TypePtr = SingletonOrSharedTypePtr; +using ConstTypePtr = SingletonOrSharedTypePtr; + +// Explicitly enable MaybeOwned>, rather than allowing +// MaybeOwned to be used for any type right away. +template +struct MaybeOwnedTraits> + : public MaybeOwnedTraitsGenericImpl> {}; + +// Base class for Types that are guaranteed to be owned by std::shared_ptr. +struct TORCH_API SharedType : public Type, public std::enable_shared_from_this { + using Type::Type; +}; + +inline TypePtr Type::withContained(std::vector contained_types) { + auto current_contained = containedTypes(); + // Types with no contained_types don't need this call. Check before calling! + // + // (We can't support this efficiently because types without + // contained types may be singletons, in which case + // shared_from_this will crash; we would have to provide a virtual + // typeptr_from_this or isSingleton.) + TORCH_INTERNAL_ASSERT(!current_contained.empty() && current_contained.size() == contained_types.size()); + if (current_contained.equals(contained_types)) { + return std::static_pointer_cast(static_cast(this)->shared_from_this()); + } + return createWithContained(std::move(contained_types)); +} + + +TORCH_API inline bool operator==(const Type& lhs, const Type& rhs) { + if (C10_UNLIKELY(!rhs.symmetric())) { + return rhs.equals(lhs); + } + return lhs.equals(rhs); +} + +struct NamedType; +using NamedTypePtr = std::shared_ptr; +using ConstNamedTypePtr = std::shared_ptr; + +struct TORCH_API NamedType : public SharedType { + NamedType(TypeKind tk, c10::optional name) + : SharedType(tk), name_(std::move(name)) { + TORCH_INTERNAL_ASSERT( + tk == TypeKind::TupleType || tk == TypeKind::FunctionType || + tk == TypeKind::ClassType || tk == TypeKind::InterfaceType || + tk == TypeKind::EnumType, + "If you add a new kind of NamedType, ", + "please update the cast specialization and this assert"); + } + + // Fully qualified name of type + // Looks like: "foo.bar.Baz". + const c10::optional& name() const { + return name_; + } + + private: + c10::optional name_; +}; + +} // namespace c10 + +namespace std { +template +struct hash> { + size_t operator()(const c10::SingletonOrSharedTypePtr& x) const { + return std::hash()(x.get()); + } +}; +} // namespace std diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h new file mode 100644 index 0000000000000000000000000000000000000000..f17917500e3b4ba34fb310d849c7529295f88f6e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/adaption.h @@ -0,0 +1,83 @@ +#pragma once + +#include +#include +#include +#include + +/* + * [Note: hacky wrapper removal for optional tensor] + * + * The kernel implementation takes an optional tensor marked in the schema as + * Tensor? but the C++ function takes Tensor instead of the optional + * expected by the dispatcher. + * + * To remove the hacky wrapper, the C++ function is changed to take + * optional and unwrap the Tensor value at the beginning of + * the function, e.g.: + * > c10::MaybeOwned weight_maybe_owned = + * > at::borrow_from_optional_tensor(weight_opt); + * > const Tensor& weight = *weight_maybe_owned; + * + * We may want to make the kernel handle optional directly without + * going through the creation of a default-constructed Tensor in + * at::borrow_from_optional_tensor. + */ + +/* + * [Note: hacky wrapper removal for TensorOptions] + * + * The kernel implementation takes a TensorOptions argument but the dispatcher + * expects separate arguments for dtype, layout, device, pin_memory. + * + * To remove the hacky wrapper, the kernel implementation is changed to take + * the 4 arguments (dtype, layout, device, pin_memory), and assemble the + * TensorOptions value at the beginning of the function, e.g.: + * > TensorOptions options = TensorOptions().dtype(dtype).layout(layout) + * > .device(device).pinned_memory(pin_memory); + * + * We may want make the kernel handle these parameters directly without going + * through the creation of a TensorOptions value. + */ + +namespace c10 { +namespace impl { + +TORCH_API void common_device_check_failure(Device common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName); + +inline void check_and_update_common_device(optional& common_device, const at::Tensor& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) { + // TODO: Remove this once the following issue is addressed: + // https://github.com/pytorch/pytorch/issues/57380 + if (!tensor.defined()) { + return; + } + + if (!common_device.has_value()) { + common_device = tensor.device(); + return; + } + + if (C10_UNLIKELY(common_device != tensor.device())) { + common_device_check_failure(*common_device, tensor, methodName, argName); + } +} + +inline void check_and_update_common_device(optional& common_device, const optional& tensor, at::CheckedFrom methodName, at::CheckedFrom argName) { + if (tensor.has_value()) { + check_and_update_common_device(common_device, tensor.value(), methodName, argName); + } +} + +inline void check_and_update_common_device(optional& common_device, at::ITensorListRef tensors, at::CheckedFrom methodName, at::CheckedFrom argName) { + for (const auto& tensor : tensors) { + check_and_update_common_device(common_device, tensor, methodName, argName); + } +} + +inline void check_and_update_common_device(optional& common_device, const List>& tensors, at::CheckedFrom methodName, at::CheckedFrom argName) { + for (const auto& tensor : tensors) { + check_and_update_common_device(common_device, tensor, methodName, argName); + } +} +} // namespace impl +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h new file mode 100644 index 0000000000000000000000000000000000000000..6e77c565388156db9f14ff607b4c1bc42240d278 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/op_registration/op_allowlist.h @@ -0,0 +1,199 @@ +#pragma once + +// TODO: unify to C10_MOBILE. In theory this header could be used in OSS. +#ifdef TEMPLATE_SELECTIVE_BUILD +#include +#endif + +/** + * This header implements functionality to build PyTorch with only a certain + * set of operators (+ dependencies) included. + * + * - Build with -DTORCH_OPERATOR_WHITELIST="aten::add;aten::sub" and only these + * two ops will be included in your build. The allowlist records operators + * only, no overloads; if you include aten::add, all overloads of aten::add + * will be included. + * + * Internally, this is done by removing the operator registration calls + * using compile time programming, and the linker will then prune all + * operator functions that weren't registered. + * See Note [Selective build] for more details + * + * WARNING: The allowlist mechanism doesn't work for all ways you could go about + * registering an operator. If the dispatch key / operator name is not + * sufficiently obvious at compile time, then the allowlisting mechanism + * will fail (and the operator will be included in the binary anyway). + */ + +#include +#include +#include + + +#if defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) +#include +#endif + +namespace c10 { + +namespace impl { + +constexpr bool allowlist_contains(string_view allowlist, string_view item); // Forward Declare + +/** + * In selective build mode returns true/false depending on whether a build + * feature is available or not. + * + * In instrumenting mode (tracing mode), always returns true, and doesn't + * trigger any side effects. + */ +constexpr bool is_build_feature_available(const char* name) { +#if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) + // Selective Build mode. +#if !defined(TORCH_BUILD_FEATURE_ALLOWLIST) + (void)name; + return true; +#else + return allowlist_contains( + C10_STRINGIZE(TORCH_BUILD_FEATURE_ALLOWLIST), + name); +#endif + +#else + // Instrumenting mode. + (void)name; + return true; +#endif +} + +[[noreturn]] void build_feature_required_feature_not_available(const char* feature); + +/** + * Use BUILD_FEATURE_REQUIRED macro in user-code. + * + * In selective build mode becomes a no-op if the build feature passed + * in is available. If not available, throws an exception (c10::Error). + * The compiler is able to perform dead code elimination for code + * following this method if the build feature is not available. + * + * In instrumenting mode (tracing mode), registers (as a side effect) + * the presence of this specific build feature being triggered. + */ +#if !defined(ENABLE_RECORD_KERNEL_FUNCTION_DTYPE) // selective build mode + +#if defined(TORCH_BUILD_FEATURE_ALLOWLIST) +#define BUILD_FEATURE_REQUIRED(NAME) \ + if (!c10::impl::is_build_feature_available(NAME)) { \ + ::c10::impl::build_feature_required_feature_not_available(NAME); \ + } +#else // Everything trivially selected +#define BUILD_FEATURE_REQUIRED(NAME) + +#endif + +#else // trace mode +#define BUILD_FEATURE_REQUIRED(NAME) \ + RECORD_FUNCTION_WITH_SCOPE( \ + at::RecordScope::BUILD_FEATURE, \ + std::string(NAME), \ + {}); +#endif + +// Use this macro, and not is_build_feature_available +#define BUILD_FEATURE_AVAILABLE(NAME) ::c10::impl::is_build_feature_available(NAME) + +// returns true iff allowlist contains item +// allowlist_contains("a;bc;d", "bc") == true +constexpr bool allowlist_contains(string_view allowlist, string_view item) { + //Choose a really big value for next so that if something goes wrong + //this code will blow up in a hopefully detectable way. + size_t next = std::numeric_limits::max(); + for (size_t cur = 0; cur <= allowlist.size(); cur = next) { + next = allowlist.find(';', cur); + if (next != string_view::npos) { + if (allowlist.substr(cur, next - cur).compare(item) == 0) { + return true; + } + next++; + } else { + if (allowlist.substr(cur).compare(item) == 0) { + return true; + } + break; + } + } + return false; +} + +// Returns true iff the given op name is on the allowlist +// and should be registered +constexpr bool op_allowlist_check(string_view op_name) { + assert(op_name.find("::") != string_view::npos); + // Use assert() instead of throw() due to a gcc bug. See: + // https://stackoverflow.com/questions/34280729/throw-in-constexpr-function + // https://github.com/fmtlib/fmt/issues/682 + assert(op_name.find("(") == string_view::npos); +#if !defined(TORCH_OPERATOR_WHITELIST) + // If the TORCH_OPERATOR_WHITELIST parameter is not defined, + // all ops are to be registered + return true; +#else + return allowlist_contains( + C10_STRINGIZE(TORCH_OPERATOR_WHITELIST), + // This function is majorly used for mobile selective build with + // root operators, where the overload is included in the allowlist. + op_name); + // // Strip overload name (as allowlist doesn't contain overloads) + // // Another function based on this may be added when there's usage + // // on op names without overload. + // OperatorNameView::parse(op_name).name); +#endif +} + +// Returns true iff the given schema string is on the allowlist +// and should be registered +constexpr bool schema_allowlist_check(string_view schema) { +#if defined(TORCH_FORCE_SCHEMA_REGISTRATION) + return true; +#else + return op_allowlist_check(schema.substr(0, schema.find("("))); +#endif +} + +// Returns true iff the given custom class name is on the allowlist +// and should be registered +constexpr bool custom_class_allowlist_check(string_view custom_class_name) { +#if !defined(TORCH_CUSTOM_CLASS_ALLOWLIST) + // If the TORCH_CUSTOM_CLASS_ALLOWLIST parameter is not defined, + // all custom classes are to be registered + (void)custom_class_name; + return true; +#else + return allowlist_contains( + C10_STRINGIZE(TORCH_CUSTOM_CLASS_ALLOWLIST), + custom_class_name); +#endif +} + +// schema_allowlist_check() implicitly depends on a macro, TORCH_OPERATOR_WHITELIST. +// Add this API to pass arbitrary allowlist. +constexpr bool op_allowlist_contains_name_in_schema(string_view allowlist, string_view schema) { + return allowlist_contains(allowlist, schema.substr(0, schema.find("("))); +} + +// Returns true iff the given dispatch key is on the allowlist +// and should be registered. When we turn this on, the list of valid +// mobile dispatch keys is hard coded (but you need to make sure +// that you have the correct set of dispatch keys for this). +constexpr bool dispatch_key_allowlist_check(DispatchKey /*k*/) { +#ifdef C10_MOBILE + return true; + // Disabled for now: to be enabled later! + // return k == DispatchKey::CPU || k == DispatchKey::Vulkan || k == DispatchKey::QuantizedCPU || k == DispatchKey::BackendSelect || k == DispatchKey::CatchAll; +#else + return true; +#endif +} + +} // namespace impl +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/rref_interface.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/rref_interface.h new file mode 100644 index 0000000000000000000000000000000000000000..cefb29c08ddc6a6b8280d31ca0ff53c912ab9fd3 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/core/rref_interface.h @@ -0,0 +1,40 @@ +#pragma once + +#include +#include + +namespace c10 { + +struct Type; +using worker_id_t = int16_t; + +// This abstract class contains only user-facing APIs, and will be shared +// between jit and distributed to implement TorchScript support. +class C10_EXPORT RRefInterface : public c10::intrusive_ptr_target { + public: + RRefInterface() = default; + // RRef is made NOT copyable NOT movable to prevent messing up reference + // counting. + RRefInterface(const RRefInterface& other) = delete; + RRefInterface(RRefInterface&& other) = delete; + RRefInterface& operator=(RRefInterface&& other) = delete; + + ~RRefInterface() override = default; + + // returns the worker id of the owner + virtual worker_id_t owner() const = 0; + + // returns the worker name of the owner + virtual std::string ownerName() const = 0; + + // Returns true if this is the ``OwnerRRef`` + virtual bool isOwner() const = 0; + + // Returns true if this is an ``OwnerRRef`` or if this ``UserRRef`` has been + // confirmed by its owner. + virtual bool confirmedByOwner() const = 0; + + virtual const TypePtr type() const = 0; +}; + +} diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/DeviceThreadHandles.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/DeviceThreadHandles.h new file mode 100644 index 0000000000000000000000000000000000000000..1eb0e245efd20968bff6c15cac65eedabdd28636 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/cuda/detail/DeviceThreadHandles.h @@ -0,0 +1,151 @@ +// Some stateful GPU libraries, such as cuDNN, cuBLAS, use handles to store states. +// These handles are tied to device, and these libraries requires/recommends not to +// share handles across host threads. +// +// These libraries recommend using one handle per host thread. We may not want to do +// this because threads are relatively light-weight, but creating and destroying +// handles is expensive (destroying the handle causes synchronizations). DataParallel, +// for example, creates new threads for each forward pass. +// +// This file implements a handle pool mechanism. The handle pool returns handles on +// demand as threads request them. If all existing handles in the pool are in use, +// it creates a new one. As threads terminate, they release handles back into the pool. +// In this way, the handle pool never creates more handles than the high-water mark of +// active threads, so it's efficient with DataParallel. + +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace at::cuda { namespace { + +template +struct DeviceThreadHandlePool : public std::enable_shared_from_this> { + + struct Handle { + Handle_t handle; + Handle(bool create = false) : handle(nullptr) + { + if(create) Create(&handle); + } + // std::vector.emplace() and push_back() may route through temporaries and call + // copy/move constructors along the way. If this is the case, we don't want + // the destructors of temporaries to call cudnnDestroy on the handle. + // We can achieve safety (for the narrow case of stashing within std::vectors) + // by making Handle moveable but not copyable, and transferring handle ownership + // to the latest constructed object. This is not a substitute for full-blown + // reference counting, but reference counting may be overkill here. + // Another alternative is to wrap the saved Handles in unique_ptrs, i.e., + // unordered_map>> created_handles; + Handle(const Handle& rhs) = delete; + // Following https://stackoverflow.com/questions/3279543/what-is-the-copy-and-swap-idiom + Handle(Handle&& rhs) : Handle() { std::swap(handle, rhs.handle); } + // operator= takes argument by value + Handle& operator=(Handle rhs) { std::swap(handle, rhs.handle); return *this; } + ~Handle() { + if(handle) Destroy(handle); + } + }; + + std::mutex mutex; + + // Handles are lazily created as different threads request them, + // but are never destroyed until the end of the process. + // The maximum number of handles this process will create for each device is equal + // to the high-water mark of the number of concurrently active threads that request + // handles for that device. + // When threads terminate, they release their handles back into the pool for reuse. + // Otherwise, new handles would be created every time new threads were spawned, + // resulting in poor performance for Python modules that repeatedly or frequently + // spawned new sets of threads (like DataParallel, which creates a new set of threads + // for each forward pass). + // + // To prevent potential deadlocks, we explicitly choose not to cap the number + // of handles that are created per device. + // Example of danger: If we cap the max handles at 4, and 5 threads are sharing a device, + // only 4 can make forward progress at any time. The other 4 will not release their + // handles until they exit, so the fifth cannot make progress until then. This is + // not a problem...UNLESS all 5 threads attempt some sort of synchronization at an + // intermediate point (ie, before any of them have exited). We have no way to anticipate + // or enforce that user threads will not attempt such intermediate synchronization. + // The only way to ensure safety is to avoid imposing a cap on the number of handles. + std::unordered_map> created_handles; + std::unordered_map> available_handles; + + // PoolWindow lazily creates and caches the handles that a particular thread is using, + // so in the common case handle access doesn't incur either handle creation or a mutex lock. + class PoolWindow + { + public: + PoolWindow(std::shared_ptr parent): weak_parent(std::move(parent)) {} + ~PoolWindow(){ release(); } + + Handle_t reserve(int device) + { + // If this thread already has a handle for this device, return it + if(my_handles.find(device) != my_handles.end()) + return my_handles[device]; + + // otherwise, either grab a handle from the pool if one is available, + // or if not, create a new one. + auto parent = weak_parent.lock(); + TORCH_CHECK(parent, "Cannot create handle during program termination"); + std::lock_guard guard(parent->mutex); + + if(parent->available_handles[device].size() > 0) + { + my_handles[device] = parent->available_handles[device].back(); + parent->available_handles[device].pop_back(); + } + else + { + // In local testing, I do observe that emplace_back sometimes routes through temporaries + // that incur move-constructor and destructor calls. See comments in Handle above. + parent->created_handles[device].emplace_back(true /*create*/); + my_handles[device] = parent->created_handles[device].back().handle; + } + + return my_handles[device]; + } + + private: + // Stores the per-device handles currently owned by this thread + std::unordered_map my_handles; + + std::weak_ptr weak_parent; + + // Called by the destructor. Releases this thread's handles back into the pool. + void release() { + if(my_handles.size() > 0) { + auto parent = weak_parent.lock(); + if (!parent) { + // If this thread exits after atexit handlers have completed, the + // cuda context itself may be invalid, so we must leak the handles. + return; + } + + std::lock_guard guard(parent->mutex); + for(auto d_h : my_handles) + parent->available_handles[d_h.first].push_back(d_h.second); + } + } + }; + + // Warning: + // If you want to change this function, be aware that this function will be called + // by multiple threads and there is no mutex guarding the call of this function, so + // make sure your implementation is thread-safe. + PoolWindow *newPoolWindow() { + // The returned pointer will be owned by a thread local variable + // so that different threads does not share the same PoolWindow. + return new PoolWindow(this->shared_from_this()); + } +}; + +}} // namespace at::cuda::detail:: diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUContext.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUContext.h new file mode 100644 index 0000000000000000000000000000000000000000..fb8fbe9c0aa4221a3384d6eb7c457d8dad54d0f0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUContext.h @@ -0,0 +1,20 @@ +#pragma once + +#include +#include +#include + +namespace at::xpu { + +// XPU is available if we compiled with XPU. +inline bool is_available() { + return c10::xpu::device_count() > 0; +} + +TORCH_XPU_API DeviceProp* getCurrentDeviceProperties(); + +TORCH_XPU_API DeviceProp* getDeviceProperties(DeviceIndex device); + +TORCH_XPU_API int32_t getGlobalIdxFromDevice(DeviceIndex device); + +} // namespace at::xpu diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUDevice.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUDevice.h new file mode 100644 index 0000000000000000000000000000000000000000..d4ab7187513c15ad0bbc8cff610ec3831f3a51fe --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUDevice.h @@ -0,0 +1,13 @@ +#pragma once + +#include +#include + +namespace at::xpu { + +inline Device getDeviceFromPtr(void* ptr) { + auto device = c10::xpu::get_device_idx_from_pointer(ptr); + return {c10::DeviceType::XPU, device}; +} + +} // namespace at::xpu diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUEvent.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUEvent.h new file mode 100644 index 0000000000000000000000000000000000000000..f82c676daf88f5802d40a5536f236f91377a9bc6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUEvent.h @@ -0,0 +1,132 @@ +#pragma once +#include + +#include + +namespace at::xpu { + +/* + * XPUEvent are movable not copyable wrappers around SYCL event. XPUEvent are + * constructed lazily when first recorded. It has a device, and this device is + * acquired from the first recording stream. Later streams that record the event + * must match the same device. + * + * Currently, XPUEvent does NOT support to export an inter-process event from + * another process via inter-process comunication(IPC). So it means that + * inter-process communication for event handles between different processes is + * not available. This could impact some applications that rely on cross-process + * synchronization and communication. + */ +struct TORCH_XPU_API XPUEvent { + // Constructors + XPUEvent(bool enable_timing = false) noexcept + : enable_timing_{enable_timing} {} + + ~XPUEvent() = default; + + XPUEvent(const XPUEvent&) = delete; + XPUEvent& operator=(const XPUEvent&) = delete; + + XPUEvent(XPUEvent&& other) = default; + XPUEvent& operator=(XPUEvent&& other) = default; + + operator sycl::event&() const { + return event(); + } + + std::optional device() const { + if (isCreated()) { + return at::Device(at::kXPU, device_index_); + } else { + return std::nullopt; + } + } + + inline bool isCreated() const { + return (event_.get() != nullptr); + } + + DeviceIndex device_index() const { + return device_index_; + } + + sycl::event& event() const { + return *event_; + } + + bool query() const { + using namespace sycl::info; + if (!isCreated()) { + return true; + } + + return event().get_info() == + event_command_status::complete; + } + + void record() { + record(getCurrentXPUStream()); + } + + void recordOnce(const XPUStream& stream) { + if (!isCreated()) { + record(stream); + } + } + + void record(const XPUStream& stream) { + if (!isCreated()) { + device_index_ = stream.device_index(); + } else { + TORCH_CHECK( + device_index_ == stream.device_index(), + "Event device ", + device_index_, + " does not match recording stream's device ", + stream.device_index(), + "."); + event_.reset(); + } + event_ = std::make_unique( + stream.queue().ext_oneapi_submit_barrier()); + } + + void block(const XPUStream& stream) { + if (isCreated()) { + std::vector event_list{event()}; + // Make this stream wait until event_ is completed. + stream.queue().ext_oneapi_submit_barrier(event_list); + } + } + + float elapsed_time(const XPUEvent& other) const { + TORCH_CHECK( + isCreated() && other.isCreated(), + "Both events must be recorded before calculating elapsed time."); + TORCH_CHECK( + query() && other.query(), + "Both events must be completed before calculating elapsed time."); + TORCH_CHECK( + enable_timing_ && other.enable_timing_, + "Both events must be created with argument 'enable_timing=True'."); + // TODO: provides the ability to time the execution of commands in a SYCL + // queue without enabling profiling on the entire queue + TORCH_CHECK_NOT_IMPLEMENTED( + false, "elapsed_time is not supported by XPUEvent."); + } + + void synchronize() const { + if (isCreated()) { + event().wait_and_throw(); + } + } + + private: + bool enable_timing_ = false; + DeviceIndex device_index_ = -1; + // Only need to track the last event, as events in an in-order queue are + // executed sequentially. + std::unique_ptr event_; +}; + +} // namespace at::xpu diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUGeneratorImpl.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUGeneratorImpl.h new file mode 100644 index 0000000000000000000000000000000000000000..ce77d2e444e6bbb2bcabf416d616e30bfad9d381 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/XPUGeneratorImpl.h @@ -0,0 +1,39 @@ +#pragma once + +#include + +namespace at { + +struct TORCH_API XPUGeneratorImpl : public GeneratorImpl { + // Constructors + XPUGeneratorImpl(DeviceIndex device_index = -1); + ~XPUGeneratorImpl() override = default; + + // XPUGeneratorImpl methods + std::shared_ptr clone() const; + void set_current_seed(uint64_t seed) override; + void set_offset(uint64_t offset) override; + uint64_t get_offset() const override; + uint64_t current_seed() const override; + uint64_t seed() override; + void set_state(const c10::TensorImpl& new_state) override; + c10::intrusive_ptr get_state() const override; + void set_philox_offset_per_thread(uint64_t offset); + uint64_t philox_offset_per_thread() const; + std::pair philox_engine_inputs(uint64_t increment); + static c10::DeviceType device_type(); + + private: + XPUGeneratorImpl* clone_impl() const override; + uint64_t seed_ = default_rng_seed_val; + uint64_t philox_offset_per_thread_ = 0; +}; + +namespace xpu::detail { + +TORCH_XPU_API const Generator& getDefaultXPUGenerator(DeviceIndex device = -1); + +TORCH_XPU_API Generator createXPUGenerator(DeviceIndex device = -1); + +} // namespace xpu::detail +} // namespace at diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/detail/XPUHooks.h b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/detail/XPUHooks.h new file mode 100644 index 0000000000000000000000000000000000000000..72c2b4c4f780d2ebe0de2cbf17f3cb22c1d8dd68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/ATen/xpu/detail/XPUHooks.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +namespace at::xpu::detail { + +// The real implementation of XPUHooksInterface +struct XPUHooks : public at::XPUHooksInterface { + XPUHooks(at::XPUHooksArgs) {} + void initXPU() const override; + bool hasXPU() const override; + std::string showConfig() const override; + int32_t getGlobalIdxFromDevice(const at::Device& device) const override; + Generator getXPUGenerator(DeviceIndex device_index = -1) const override; + const Generator& getDefaultXPUGenerator( + DeviceIndex device_index = -1) const override; + Device getDeviceFromPtr(void* data) const override; + c10::DeviceIndex getNumGPUs() const override; + DeviceIndex current_device() const override; + void deviceSynchronize(DeviceIndex device_index) const override; +}; + +} // namespace at::xpu::detail diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/macros/Export.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/macros/Export.h new file mode 100644 index 0000000000000000000000000000000000000000..cb68060ed8129d408f1d4fdddd4bb1cdd9cd5053 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/macros/Export.h @@ -0,0 +1,160 @@ +#ifndef C10_MACROS_EXPORT_H_ +#define C10_MACROS_EXPORT_H_ + +/* Header file to define the common scaffolding for exported symbols. + * + * Export is by itself a quite tricky situation to deal with, and if you are + * hitting this file, make sure you start with the background here: + * - Linux: https://gcc.gnu.org/wiki/Visibility + * - Windows: + * https://docs.microsoft.com/en-us/cpp/cpp/dllexport-dllimport?view=vs-2017 + * + * Do NOT include this file directly. Instead, use c10/macros/Macros.h + */ + +// You do not need to edit this part of file unless you are changing the core +// pytorch export abstractions. +// +// This part defines the C10 core export and import macros. This is controlled +// by whether we are building shared libraries or not, which is determined +// during build time and codified in c10/core/cmake_macros.h. +// When the library is built as a shared lib, EXPORT and IMPORT will contain +// visibility attributes. If it is being built as a static lib, then EXPORT +// and IMPORT basically have no effect. + +// As a rule of thumb, you should almost NEVER mix static and shared builds for +// libraries that depend on c10. AKA, if c10 is built as a static library, we +// recommend everything dependent on c10 to be built statically. If c10 is built +// as a shared library, everything dependent on it should be built as shared. In +// the PyTorch project, all native libraries shall use the macro +// C10_BUILD_SHARED_LIB to check whether pytorch is building shared or static +// libraries. + +// For build systems that do not directly depend on CMake and directly build +// from the source directory (such as Buck), one may not have a cmake_macros.h +// file at all. In this case, the build system is responsible for providing +// correct macro definitions corresponding to the cmake_macros.h.in file. +// +// In such scenarios, one should define the macro +// C10_USING_CUSTOM_GENERATED_MACROS +// to inform this header that it does not need to include the cmake_macros.h +// file. + +#ifndef C10_USING_CUSTOM_GENERATED_MACROS +#include +#endif // C10_USING_CUSTOM_GENERATED_MACROS + +#ifdef _WIN32 +#define C10_HIDDEN +#if defined(C10_BUILD_SHARED_LIBS) +#define C10_EXPORT __declspec(dllexport) +#define C10_IMPORT __declspec(dllimport) +#else +#define C10_EXPORT +#define C10_IMPORT +#endif +#else // _WIN32 +#if defined(__GNUC__) +#define C10_EXPORT __attribute__((__visibility__("default"))) +#define C10_HIDDEN __attribute__((__visibility__("hidden"))) +#else // defined(__GNUC__) +#define C10_EXPORT +#define C10_HIDDEN +#endif // defined(__GNUC__) +#define C10_IMPORT C10_EXPORT +#endif // _WIN32 + +#ifdef NO_EXPORT +#undef C10_EXPORT +#define C10_EXPORT +#endif + +// Definition of an adaptive XX_API macro, that depends on whether you are +// building the library itself or not, routes to XX_EXPORT and XX_IMPORT. +// Basically, you will need to do this for each shared library that you are +// building, and the instruction is as follows: assuming that you are building +// a library called libawesome.so. You should: +// (1) for your cmake target (usually done by "add_library(awesome, ...)"), +// define a macro called AWESOME_BUILD_MAIN_LIB using +// target_compile_options. +// (2) define the AWESOME_API macro similar to the one below. +// And in the source file of your awesome library, use AWESOME_API to +// annotate public symbols. + +// Here, for the C10 library, we will define the macro C10_API for both import +// and export. + +// This one is being used by libc10.so +#ifdef C10_BUILD_MAIN_LIB +#define C10_API C10_EXPORT +#else +#define C10_API C10_IMPORT +#endif + +// This one is being used by libtorch.so +#ifdef CAFFE2_BUILD_MAIN_LIB +#define TORCH_API C10_EXPORT +#else +#define TORCH_API C10_IMPORT +#endif + +// You may be wondering: Whose brilliant idea was it to split torch_cuda into +// two pieces with confusing names? +// Once upon a time, there _was_ only TORCH_CUDA_API. All was happy until we +// tried to compile PyTorch for CUDA 11.1, which ran into relocation marker +// issues when linking big binaries. +// (https://github.com/pytorch/pytorch/issues/39968) We had two choices: +// (1) Stop supporting so many GPU architectures +// (2) Do something else +// We chose #2 and decided to split the behemoth that was torch_cuda into two +// smaller libraries, one with most of the core kernel functions (torch_cuda_cu) +// and the other that had..well..everything else (torch_cuda_cpp). The idea was +// this: instead of linking our static libraries (like the hefty +// libcudnn_static.a) with another huge library, torch_cuda, and run into pesky +// relocation marker issues, we could link our static libraries to a smaller +// part of torch_cuda (torch_cuda_cpp) and avoid the issues. + +// libtorch_cuda_cu.so +#ifdef TORCH_CUDA_CU_BUILD_MAIN_LIB +#define TORCH_CUDA_CU_API C10_EXPORT +#elif defined(BUILD_SPLIT_CUDA) +#define TORCH_CUDA_CU_API C10_IMPORT +#endif + +// libtorch_cuda_cpp.so +#ifdef TORCH_CUDA_CPP_BUILD_MAIN_LIB +#define TORCH_CUDA_CPP_API C10_EXPORT +#elif defined(BUILD_SPLIT_CUDA) +#define TORCH_CUDA_CPP_API C10_IMPORT +#endif + +// libtorch_cuda.so (where torch_cuda_cu and torch_cuda_cpp are a part of the +// same api) +#ifdef TORCH_CUDA_BUILD_MAIN_LIB +#define TORCH_CUDA_CPP_API C10_EXPORT +#define TORCH_CUDA_CU_API C10_EXPORT +#elif !defined(BUILD_SPLIT_CUDA) +#define TORCH_CUDA_CPP_API C10_IMPORT +#define TORCH_CUDA_CU_API C10_IMPORT +#endif + +#if defined(TORCH_HIP_BUILD_MAIN_LIB) +#define TORCH_HIP_API C10_EXPORT +#else +#define TORCH_HIP_API C10_IMPORT +#endif + +#if defined(TORCH_XPU_BUILD_MAIN_LIB) +#define TORCH_XPU_API C10_EXPORT +#else +#define TORCH_XPU_API C10_IMPORT +#endif + +// Enums only need to be exported on windows for non-CUDA files +#if defined(_WIN32) && defined(__CUDACC__) +#define C10_API_ENUM C10_API +#else +#define C10_API_ENUM +#endif + +#endif // C10_MACROS_MACROS_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/macros/cmake_macros.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/macros/cmake_macros.h new file mode 100644 index 0000000000000000000000000000000000000000..b7bab536564cb8475d2dc9edb645a37547f0914a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/macros/cmake_macros.h @@ -0,0 +1,14 @@ +#ifndef C10_MACROS_CMAKE_MACROS_H_ +#define C10_MACROS_CMAKE_MACROS_H_ + +// Automatically generated header file for the C10 library. +// Do not include this file directly. Instead, include c10/macros/Macros.h. + +#define C10_BUILD_SHARED_LIBS +/* #undef C10_USE_GLOG */ +/* #undef C10_USE_GFLAGS */ +/* #undef C10_USE_NUMA */ +/* #undef C10_USE_MSVC_STATIC_RUNTIME */ +/* #undef C10_USE_ROCM_KERNEL_ASSERT */ + +#endif // C10_MACROS_CMAKE_MACROS_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/AbortHandler.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/AbortHandler.h new file mode 100644 index 0000000000000000000000000000000000000000..a75f5c235171ca909f79ead812fe5be897185f95 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/AbortHandler.h @@ -0,0 +1,81 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +namespace c10 { +class AbortHandlerHelper { + public: + static AbortHandlerHelper& getInstance() { +#ifdef _WIN32 + thread_local +#endif // _WIN32 + static AbortHandlerHelper instance; + return instance; + } + + void set(std::terminate_handler handler) { + std::lock_guard lk(mutex); + if (!inited) { + prev = std::set_terminate(handler); + curr = std::get_terminate(); + inited = true; + } + } + + std::terminate_handler getPrev() const { + return prev; + } + + private: + std::terminate_handler prev = nullptr; + std::terminate_handler curr = nullptr; + bool inited = false; + std::mutex mutex; + AbortHandlerHelper() = default; + ~AbortHandlerHelper() { + // Only restore the handler if we are the current one + if (inited && curr == std::get_terminate()) { + std::set_terminate(prev); + } + } + + public: + AbortHandlerHelper(AbortHandlerHelper const&) = delete; + void operator=(AbortHandlerHelper const&) = delete; +}; + +namespace detail { +C10_ALWAYS_INLINE void terminate_handler() { + std::cout << "Unhandled exception caught in c10/util/AbortHandler.h" << '\n'; + auto backtrace = get_backtrace(); + std::cout << backtrace << '\n' << std::flush; + auto prev_handler = AbortHandlerHelper::getInstance().getPrev(); + if (prev_handler) { + prev_handler(); + } else { + std::abort(); + } +} +} // namespace detail + +C10_ALWAYS_INLINE void set_terminate_handler() { + bool use_custom_terminate = false; + // On Windows it is enabled by default based on + // https://github.com/pytorch/pytorch/pull/50320#issuecomment-763147062 +#ifdef _WIN32 + use_custom_terminate = true; +#endif // _WIN32 + auto result = c10::utils::check_env("TORCH_CUSTOM_TERMINATE"); + if (result != std::nullopt) { + use_custom_terminate = result.value(); + } + if (use_custom_terminate) { + AbortHandlerHelper::getInstance().set(detail::terminate_handler); + } +} +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/ApproximateClock.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/ApproximateClock.h new file mode 100644 index 0000000000000000000000000000000000000000..4b3b5b6bb9aba103c8f92fd74fc2531e5eb0fc55 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/ApproximateClock.h @@ -0,0 +1,115 @@ +// Copyright 2023-present Facebook. All Rights Reserved. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(C10_IOS) && defined(C10_MOBILE) +#include // for gettimeofday() +#endif + +#if defined(__i386__) || defined(__x86_64__) || defined(__amd64__) +#define C10_RDTSC +#if defined(_MSC_VER) +#include +#elif defined(__CUDACC__) || defined(__HIPCC__) +#undef C10_RDTSC +#elif defined(__clang__) +// `__rdtsc` is available by default. +// NB: This has to be first, because Clang will also define `__GNUC__` +#elif defined(__GNUC__) +#include +#else +#undef C10_RDTSC +#endif +#endif + +namespace c10 { + +using time_t = int64_t; +using steady_clock_t = std::conditional_t< + std::chrono::high_resolution_clock::is_steady, + std::chrono::high_resolution_clock, + std::chrono::steady_clock>; + +inline time_t getTimeSinceEpoch() { + auto now = std::chrono::system_clock::now().time_since_epoch(); + return std::chrono::duration_cast(now).count(); +} + +inline time_t getTime(bool allow_monotonic = false) { +#if defined(C10_IOS) && defined(C10_MOBILE) + // clock_gettime is only available on iOS 10.0 or newer. Unlike OS X, iOS + // can't rely on CLOCK_REALTIME, as it is defined no matter if clock_gettime + // is implemented or not + struct timeval now; + gettimeofday(&now, NULL); + return static_cast(now.tv_sec) * 1000000000 + + static_cast(now.tv_usec) * 1000; +#elif defined(_WIN32) || defined(__MACH__) + return std::chrono::duration_cast( + steady_clock_t::now().time_since_epoch()) + .count(); +#else + // clock_gettime is *much* faster than std::chrono implementation on Linux + struct timespec t {}; + auto mode = CLOCK_REALTIME; + if (allow_monotonic) { + mode = CLOCK_MONOTONIC; + } + clock_gettime(mode, &t); + return static_cast(t.tv_sec) * 1000000000 + + static_cast(t.tv_nsec); +#endif +} + +// We often do not need to capture true wall times. If a fast mechanism such +// as TSC is available we can use that instead and convert back to epoch time +// during post processing. This greatly reduce the clock's contribution to +// profiling. +// http://btorpey.github.io/blog/2014/02/18/clock-sources-in-linux/ +// https://quick-bench.com/q/r8opkkGZSJMu9wM_XTbDouq-0Io +// TODO: We should use +// `https://github.com/google/benchmark/blob/main/src/cycleclock.h` +inline auto getApproximateTime() { +#if defined(C10_RDTSC) + return static_cast(__rdtsc()); +#else + return getTime(); +#endif +} + +using approx_time_t = decltype(getApproximateTime()); +static_assert( + std::is_same_v || + std::is_same_v, + "Expected either int64_t (`getTime`) or uint64_t (some TSC reads)."); + +// Convert `getCount` results to Nanoseconds since unix epoch. +class C10_API ApproximateClockToUnixTimeConverter final { + public: + ApproximateClockToUnixTimeConverter(); + std::function makeConverter(); + + struct UnixAndApproximateTimePair { + time_t t_; + approx_time_t approx_t_; + }; + static UnixAndApproximateTimePair measurePair(); + + private: + static constexpr size_t replicates = 1001; + using time_pairs = std::array; + time_pairs measurePairs(); + + time_pairs start_times_; +}; + +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-math.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-math.h new file mode 100644 index 0000000000000000000000000000000000000000..88a6b849d37bf931e76f3849e68e9dd17a22e000 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/BFloat16-math.h @@ -0,0 +1,287 @@ +#pragma once + +#include +#include + +C10_CLANG_DIAGNOSTIC_PUSH() +#if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion") +C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion") +#endif + +namespace std { + +template +struct is_reduced_floating_point + : std::integral_constant< + bool, + std::is_same_v || std::is_same_v> {}; + +template +constexpr bool is_reduced_floating_point_v = + is_reduced_floating_point::value; + +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T acos(T a) { + return std::acos(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T asin(T a) { + return std::asin(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T atan(T a) { + return std::atan(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T atanh(T a) { + return std::atanh(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T erf(T a) { + return std::erf(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T erfc(T a) { + return std::erfc(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T exp(T a) { + return std::exp(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T expm1(T a) { + return std::expm1(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T log(T a) { + return std::log(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T log10(T a) { + return std::log10(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T log1p(T a) { + return std::log1p(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T log2(T a) { + return std::log2(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T ceil(T a) { + return std::ceil(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T cos(T a) { + return std::cos(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T floor(T a) { + return std::floor(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T nearbyint(T a) { + return std::nearbyint(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T sin(T a) { + return std::sin(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T tan(T a) { + return std::tan(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T sinh(T a) { + return std::sinh(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T cosh(T a) { + return std::cosh(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T tanh(T a) { + return std::tanh(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T trunc(T a) { + return std::trunc(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T lgamma(T a) { + return std::lgamma(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T sqrt(T a) { + return std::sqrt(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T rsqrt(T a) { + return 1.0 / std::sqrt(float(a)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T abs(T a) { + return std::abs(float(a)); +} +#if defined(_MSC_VER) && defined(__CUDACC__) +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T pow(T a, double b) { + return std::pow(float(a), float(b)); +} +#else +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T pow(T a, double b) { + return std::pow(float(a), b); +} +#endif +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T pow(T a, T b) { + return std::pow(float(a), float(b)); +} +template < + typename T, + typename std::enable_if_t, int> = 0> +inline T fmod(T a, T b) { + return std::fmod(float(a), float(b)); +} + +/* + The following function is inspired from the implementation in `musl` + Link to License: https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT + ---------------------------------------------------------------------- + Copyright © 2005-2020 Rich Felker, et al. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + ---------------------------------------------------------------------- + */ +template < + typename T, + typename std::enable_if_t, int> = 0> +C10_HOST_DEVICE inline T nextafter(T from, T to) { + // Reference: + // https://git.musl-libc.org/cgit/musl/tree/src/math/nextafter.c + using int_repr_t = uint16_t; + using float_t = T; + constexpr uint8_t bits = 16; + union { + float_t f; + int_repr_t i; + } ufrom = {from}, uto = {to}; + + // get a mask to get the sign bit i.e. MSB + int_repr_t sign_mask = int_repr_t{1} << (bits - 1); + + // short-circuit: if either is NaN, return NaN + if (from != from || to != to) { + return from + to; + } + + // short-circuit: if they are exactly the same. + if (ufrom.i == uto.i) { + return from; + } + + // mask the sign-bit to zero i.e. positive + // equivalent to abs(x) + int_repr_t abs_from = ufrom.i & ~sign_mask; + int_repr_t abs_to = uto.i & ~sign_mask; + if (abs_from == 0) { + // if both are zero but with different sign, + // preserve the sign of `to`. + if (abs_to == 0) { + return to; + } + // smallest subnormal with sign of `to`. + ufrom.i = (uto.i & sign_mask) | int_repr_t{1}; + return ufrom.f; + } + + // if abs(from) > abs(to) or sign(from) != sign(to) + if (abs_from > abs_to || ((ufrom.i ^ uto.i) & sign_mask)) { + ufrom.i--; + } else { + ufrom.i++; + } + + return ufrom.f; +} + +} // namespace std + +C10_CLANG_DIAGNOSTIC_POP() diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Backtrace.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Backtrace.h new file mode 100644 index 0000000000000000000000000000000000000000..75691286d9019a778b0b6752d3fbde492a2ebdfb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Backtrace.h @@ -0,0 +1,17 @@ +#ifndef C10_UTIL_BACKTRACE_H_ +#define C10_UTIL_BACKTRACE_H_ + +#include +#include +#include + +#include + +namespace c10 { +C10_API std::string get_backtrace( + size_t frames_to_skip = 0, + size_t maximum_number_of_frames = 64, + bool skip_python_frames = true); +} // namespace c10 + +#endif // C10_UTIL_BACKTRACE_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Bitset.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Bitset.h new file mode 100644 index 0000000000000000000000000000000000000000..fede88f682b76cd7b214977a5b22478e7fdaf2a8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Bitset.h @@ -0,0 +1,116 @@ +#pragma once + +#include +#if defined(_MSC_VER) +#include +#endif + +namespace c10::utils { + +/** + * This is a simple bitset class with sizeof(long long int) bits. + * You can set bits, unset bits, query bits by index, + * and query for the first set bit. + * Before using this class, please also take a look at std::bitset, + * which has more functionality and is more generic. It is probably + * a better fit for your use case. The sole reason for c10::utils::bitset + * to exist is that std::bitset misses a find_first_set() method. + */ +struct bitset final { + private: +#if defined(_MSC_VER) + // MSVCs _BitScanForward64 expects int64_t + using bitset_type = int64_t; +#else + // POSIX ffsll expects long long int + using bitset_type = long long int; +#endif + public: + static constexpr size_t NUM_BITS() { + return 8 * sizeof(bitset_type); + } + + constexpr bitset() noexcept = default; + constexpr bitset(const bitset&) noexcept = default; + constexpr bitset(bitset&&) noexcept = default; + // there is an issure for gcc 5.3.0 when define default function as constexpr + // see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=68754. + bitset& operator=(const bitset&) noexcept = default; + bitset& operator=(bitset&&) noexcept = default; + + constexpr void set(size_t index) noexcept { + bitset_ |= (static_cast(1) << index); + } + + constexpr void unset(size_t index) noexcept { + bitset_ &= ~(static_cast(1) << index); + } + + constexpr bool get(size_t index) const noexcept { + return bitset_ & (static_cast(1) << index); + } + + constexpr bool is_entirely_unset() const noexcept { + return 0 == bitset_; + } + + // Call the given functor with the index of each bit that is set + template + void for_each_set_bit(Func&& func) const { + bitset cur = *this; + size_t index = cur.find_first_set(); + while (0 != index) { + // -1 because find_first_set() is not one-indexed. + index -= 1; + func(index); + cur.unset(index); + index = cur.find_first_set(); + } + } + + private: + // Return the index of the first set bit. The returned index is one-indexed + // (i.e. if the very first bit is set, this function returns '1'), and a + // return of '0' means that there was no bit set. + size_t find_first_set() const { +#if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64)) + unsigned long result; + bool has_bits_set = (0 != _BitScanForward64(&result, bitset_)); + if (!has_bits_set) { + return 0; + } + return result + 1; +#elif defined(_MSC_VER) && defined(_M_IX86) + unsigned long result; + if (static_cast(bitset_) != 0) { + bool has_bits_set = + (0 != _BitScanForward(&result, static_cast(bitset_))); + if (!has_bits_set) { + return 0; + } + return result + 1; + } else { + bool has_bits_set = + (0 != _BitScanForward(&result, static_cast(bitset_ >> 32))); + if (!has_bits_set) { + return 32; + } + return result + 33; + } +#else + return __builtin_ffsll(bitset_); +#endif + } + + friend bool operator==(bitset lhs, bitset rhs) noexcept { + return lhs.bitset_ == rhs.bitset_; + } + + bitset_type bitset_{0}; +}; + +inline bool operator!=(bitset lhs, bitset rhs) noexcept { + return !(lhs == rhs); +} + +} // namespace c10::utils diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/C++17.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/C++17.h new file mode 100644 index 0000000000000000000000000000000000000000..4c2c3a6634658d93cc13e46d40bca67747224ce0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/C++17.h @@ -0,0 +1,166 @@ +#pragma once +#ifndef C10_UTIL_CPP17_H_ +#define C10_UTIL_CPP17_H_ + +#include +#include +#include +#include +#include + +#if !defined(__clang__) && !defined(_MSC_VER) && defined(__GNUC__) && \ + __GNUC__ < 9 +#error \ + "You're trying to build PyTorch with a too old version of GCC. We need GCC 9 or later." +#endif + +#if defined(__clang__) && __clang_major__ < 9 +#error \ + "You're trying to build PyTorch with a too old version of Clang. We need Clang 9 or later." +#endif + +#if (defined(_MSC_VER) && (!defined(_MSVC_LANG) || _MSVC_LANG < 201703L)) || \ + (!defined(_MSC_VER) && __cplusplus < 201703L) +#error You need C++17 to compile PyTorch +#endif + +#if defined(_WIN32) && (defined(min) || defined(max)) +#error Macro clash with min and max -- define NOMINMAX when compiling your program on Windows +#endif + +/* + * This header adds some polyfills with C++17 functionality + */ + +namespace c10 { + +// in c++17 std::result_of has been superseded by std::invoke_result. Since +// c++20, std::result_of is removed. +template +#if defined(__cpp_lib_is_invocable) && __cpp_lib_is_invocable >= 201703L +using invoke_result = typename std::invoke_result; +#else +using invoke_result = typename std::result_of; +#endif + +template +using invoke_result_t = typename invoke_result::type; + +// std::is_pod is deprecated in C++20, std::is_standard_layout and +// std::is_trivial are introduced in C++11, std::conjunction has been introduced +// in C++17. +template +using is_pod = std::conjunction, std::is_trivial>; + +template +constexpr bool is_pod_v = is_pod::value; + +namespace guts { + +template +std::enable_if_t< + !std::is_array_v && !std::is_array_v && + std::is_base_of_v, + std::unique_ptr> +make_unique_base(Args&&... args) { + return std::unique_ptr(new Child(std::forward(args)...)); +} + +template +using conjunction = std::conjunction; +template +using disjunction = std::disjunction; +template +using bool_constant = std::bool_constant; +template +using negation = std::negation; + +template +using void_t = std::void_t; + +#if defined(__cpp_lib_apply) && !defined(__CUDA_ARCH__) && !defined(__HIP__) + +template +C10_HOST_DEVICE inline constexpr decltype(auto) apply(F&& f, Tuple&& t) { + return std::apply(std::forward(f), std::forward(t)); +} + +#else + +// Implementation from http://en.cppreference.com/w/cpp/utility/apply (but +// modified) +// TODO This is an incomplete implementation of std::apply, not working for +// member functions. +namespace detail { +template +#if defined(_MSC_VER) +// MSVC has a problem with the decltype() return type, but it also doesn't need +// it +C10_HOST_DEVICE constexpr auto apply_impl( + F&& f, + Tuple&& t, + std::index_sequence) +#else +// GCC/Clang need the decltype() return type +C10_HOST_DEVICE constexpr decltype(auto) apply_impl( + F&& f, + Tuple&& t, + std::index_sequence) +#endif +{ + return std::forward(f)(std::get(std::forward(t))...); +} +} // namespace detail + +template +C10_HOST_DEVICE constexpr decltype(auto) apply(F&& f, Tuple&& t) { + return detail::apply_impl( + std::forward(f), + std::forward(t), + std::make_index_sequence< + std::tuple_size>::value>{}); +} + +#endif + +template +std::enable_if_t< + std::is_member_pointer_v>, + typename c10::invoke_result_t> +invoke(Functor&& f, Args&&... args) { + return std::mem_fn(std::forward(f))(std::forward(args)...); +} + +template +std::enable_if_t< + !std::is_member_pointer_v>, + typename c10::invoke_result_t> +invoke(Functor&& f, Args&&... args) { + return std::forward(f)(std::forward(args)...); +} + +namespace detail { +struct _identity final { + template + using type_identity = T; + + template + decltype(auto) operator()(T&& arg) { + return std::forward(arg); + } +}; + +template +struct function_takes_identity_argument : std::false_type {}; + +template +struct function_takes_identity_argument< + Func, + std::void_t()(_identity()))>> : std::true_type { +}; +} // namespace detail + +} // namespace guts +} // namespace c10 + +#endif // C10_UTIL_CPP17_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/DeadlockDetection.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/DeadlockDetection.h new file mode 100644 index 0000000000000000000000000000000000000000..ce72b4a35d0fd35e9f00e034040a31e8e3b8b28e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/DeadlockDetection.h @@ -0,0 +1,48 @@ +#pragma once + +#include +#include + +/// This file provides some simple utilities for detecting common deadlocks in +/// PyTorch. For now, we focus exclusively on detecting Python GIL deadlocks, +/// as the GIL is a wide ranging lock that is taken out in many situations. +/// The basic strategy is before performing an operation that may block, you +/// can use TORCH_ASSERT_NO_GIL_WITHOUT_PYTHON_DEP() to assert that the GIL is +/// not held. This macro is to be used in contexts where no static dependency +/// on Python is available (we will handle indirecting a virtual call for you). +/// +/// If the GIL is held by a torchdeploy interpreter, we always report false. +/// If you are in a context where Python bindings are available, it's better +/// to directly assert on PyGILState_Check (as it avoids a vcall and also +/// works correctly with torchdeploy.) + +#define TORCH_ASSERT_NO_GIL_WITHOUT_PYTHON_DEP() \ + TORCH_INTERNAL_ASSERT( \ + !c10::impl::check_python_gil(), \ + "Holding GIL before a blocking operation! Please release the GIL before blocking, or see https://github.com/pytorch/pytorch/issues/56297 for how to release the GIL for destructors of objects") + +namespace c10::impl { + +C10_API bool check_python_gil(); + +struct C10_API PythonGILHooks { + virtual ~PythonGILHooks() = default; + // Returns true if we hold the GIL. If not linked against Python we + // always return false. + virtual bool check_python_gil() const = 0; +}; + +C10_API void SetPythonGILHooks(PythonGILHooks* factory); + +// DO NOT call this registerer from a torch deploy instance! You will clobber +// other registrations +struct C10_API PythonGILHooksRegisterer { + explicit PythonGILHooksRegisterer(PythonGILHooks* factory) { + SetPythonGILHooks(factory); + } + ~PythonGILHooksRegisterer() { + SetPythonGILHooks(nullptr); + } +}; + +} // namespace c10::impl diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Exception.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Exception.h new file mode 100644 index 0000000000000000000000000000000000000000..02c6d2ff0fb4198f1263a8594407be8a88014f03 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Exception.h @@ -0,0 +1,711 @@ +#ifndef C10_UTIL_EXCEPTION_H_ +#define C10_UTIL_EXCEPTION_H_ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#if defined(_MSC_VER) && _MSC_VER <= 1900 +#define __func__ __FUNCTION__ +#endif + +namespace c10 { + +/// The primary ATen error class. +/// Provides a complete error message with source location information via +/// `what()`, and a more concise message via `what_without_backtrace()`. +/// Don't throw this directly; use TORCH_CHECK/TORCH_INTERNAL_ASSERT instead. +/// +/// NB: c10::Error is handled specially by the default torch to suppress the +/// backtrace, see torch/csrc/Exceptions.h +class C10_API Error : public std::exception { + // The actual error message. + std::string msg_; + + // Context for the message (in order of decreasing specificity). Context will + // be automatically formatted appropriately, so it is not necessary to add + // extra leading/trailing newlines to strings inside this vector + std::vector context_; + + // The C++ backtrace at the point when this exception was raised. This + // may be empty if there is no valid backtrace. (We don't use optional + // here to reduce the dependencies this file has.) + std::string backtrace_; + + // These two are derived fields from msg_stack_ and backtrace_, but we need + // fields for the strings so that we can return a const char* (as the + // signature of std::exception requires). Currently, the invariant + // is that these fields are ALWAYS populated consistently with respect + // to msg_stack_ and backtrace_. + std::string what_; + std::string what_without_backtrace_; + + // This is a little debugging trick: you can stash a relevant pointer + // in caller, and then when you catch the exception, you can compare + // against pointers you have on hand to get more information about + // where the exception came from. In Caffe2, this is used to figure + // out which operator raised an exception. + const void* caller_; + + public: + // PyTorch-style Error constructor. NB: the implementation of this + // is actually in Logging.cpp + Error(SourceLocation source_location, std::string msg); + + // Caffe2-style error message + Error( + const char* file, + const uint32_t line, + const char* condition, + const std::string& msg, + const std::string& backtrace, + const void* caller = nullptr); + + // Base constructor + Error(std::string msg, std::string backtrace, const void* caller = nullptr); + + // Add some new context to the message stack. The last added context + // will be formatted at the end of the context list upon printing. + // WARNING: This method is O(n) in the size of the stack, so don't go + // wild adding a ridiculous amount of context to error messages. + void add_context(std::string msg); + + const std::string& msg() const { + return msg_; + } + + const std::vector& context() const { + return context_; + } + + const std::string& backtrace() const { + return backtrace_; + } + + /// Returns the complete error message, including the source location. + /// The returned pointer is invalidated if you call add_context() on + /// this object. + const char* what() const noexcept override { + return what_.c_str(); + } + + const void* caller() const noexcept { + return caller_; + } + + /// Returns only the error message string, without source location. + /// The returned pointer is invalidated if you call add_context() on + /// this object. + virtual const char* what_without_backtrace() const noexcept { + return what_without_backtrace_.c_str(); + } + + private: + void refresh_what(); + std::string compute_what(bool include_backtrace) const; +}; + +class C10_API Warning { + public: + class C10_API UserWarning {}; + class C10_API DeprecationWarning {}; + + using warning_variant_t = std::variant; + + Warning( + warning_variant_t type, + const SourceLocation& source_location, + std::string msg, + bool verbatim); + + Warning( + warning_variant_t type, + SourceLocation source_location, + const char* msg, + bool verbatim); + + Warning( + warning_variant_t type, + SourceLocation source_location, + ::c10::detail::CompileTimeEmptyString msg, + bool verbatim); + + // Getters for members + warning_variant_t type() const; + const SourceLocation& source_location() const; + const std::string& msg() const; + bool verbatim() const; + + private: + // The type of warning + warning_variant_t type_; + + // Where the warning happened. + SourceLocation source_location_; + + // The actual warning message. + std::string msg_; + + // See note: [Verbatim Warnings] + bool verbatim_; +}; + +using UserWarning = Warning::UserWarning; +using DeprecationWarning = Warning::DeprecationWarning; + +// Issue a warning with a given message. Dispatched to the current +// warning handler. +void C10_API warn(const Warning& warning); + +class C10_API WarningHandler { + public: + virtual ~WarningHandler() = default; + /// The default warning handler. Prints the message to stderr. + virtual void process(const Warning& warning); +}; + +namespace WarningUtils { + +// Note: [Verbatim Warnings] +// Warnings originating in C++ code can appear out-of-place to Python users: +// a user runs a line in Python, but the warning references a line in C++. +// Some parts of PyTorch, like the JIT, are cognizant of this mismatch +// and take care to map warnings back to the user's program, but most +// of PyTorch simply throws a context-free warning. To allow warning +// handlers to add context where appropriate, warn takes the +// "verbatim" flag. When this is false a warning handler might append +// the C++ warning to a Python warning message that relates the warning +// back to the user's program. Callers who have already accounted for +// context in their warnings should set verbatim to true so their warnings +// appear without modification. + +/// Sets the global warning handler. This is not thread-safe, so it should +/// generally be called once during initialization or while holding the GIL +/// for programs that use python. +/// User is responsible for keeping the WarningHandler alive until +/// it is not needed. +C10_API void set_warning_handler(WarningHandler* handler) noexcept(true); +/// Gets the global warning handler. +C10_API WarningHandler* get_warning_handler() noexcept(true); + +class C10_API WarningHandlerGuard { + WarningHandler* prev_handler_; + + public: + WarningHandlerGuard(WarningHandler* new_handler) + : prev_handler_(c10::WarningUtils::get_warning_handler()) { + c10::WarningUtils::set_warning_handler(new_handler); + } + ~WarningHandlerGuard() { + c10::WarningUtils::set_warning_handler(prev_handler_); + } +}; + +/// The TORCH_WARN_ONCE macro is difficult to test for. Use +/// setWarnAlways(true) to turn it into TORCH_WARN, which can be +/// tested for more easily. +C10_API void set_warnAlways(bool) noexcept(true); +C10_API bool get_warnAlways() noexcept(true); + +// A RAII guard that sets warn_always (not thread-local) on +// construction, and sets it back to the original value upon destruction. +struct C10_API WarnAlways { + public: + explicit WarnAlways(bool setting = true); + ~WarnAlways(); + + private: + bool prev_setting; +}; + +} // namespace WarningUtils + +// Like Error, but we always report the C++ backtrace, instead of only +// reporting when TORCH_SHOW_CPP_STACKTRACES +class C10_API ErrorAlwaysShowCppStacktrace : public Error { + using Error::Error; + const char* what_without_backtrace() const noexcept override { + return what(); + } +}; + +// Used in ATen for out-of-bound indices that can reasonably only be detected +// lazily inside a kernel (See: advanced indexing). These turn into +// IndexError when they cross to Python. +class C10_API IndexError : public Error { + using Error::Error; +}; + +// Used in ATen for invalid values. These turn into +// ValueError when they cross to Python. +class C10_API ValueError : public Error { + using Error::Error; +}; + +// Used in ATen for invalid types. These turn into +// TypeError when they cross to Python. +class C10_API TypeError : public Error { + using Error::Error; +}; + +// Used in ATen for functionality that is not implemented. These turn into +// NotImplementedError when they cross to Python. +class C10_API NotImplementedError : public Error { + using Error::Error; +}; + +// Used in ATen for non finite indices. These turn into +// ExitException when they cross to Python. +class C10_API EnforceFiniteError : public Error { + using Error::Error; +}; + +// Used in Onnxifi backend lowering. These turn into +// ExitException when they cross to Python. +class C10_API OnnxfiBackendSystemError : public Error { + using Error::Error; +}; + +// Used for numerical errors from the linalg module. These +// turn into LinAlgError when they cross into Python. +class C10_API LinAlgError : public Error { + using Error::Error; +}; + +class C10_API OutOfMemoryError : public Error { + using Error::Error; +}; + +// Base error type for all distributed errors. +// These turn into DistError when they cross into Python. +class C10_API DistError : public Error { + using Error::Error; +}; + +// Used for collective communication library errors from the distributed module. +// These turn into DistBackendError when they cross into Python. +class C10_API DistBackendError : public DistError { + using DistError::DistError; +}; + +// Used for errors originating from the store. +// These turn into DistStoreError when they cross into Python. +class C10_API DistStoreError : public DistError { + using DistError::DistError; +}; + +// Used for errors originating from the TCP/IP stack and not from collective +// libraries. These turn into DistNetworkError when they cross into Python. +class C10_API DistNetworkError : public DistError { + using DistError::DistError; +}; + +// A utility function to return an exception std::string by prepending its +// exception type before its what() content +C10_API std::string GetExceptionString(const std::exception& e); + +} // namespace c10 + +// Private helper macro for implementing TORCH_INTERNAL_ASSERT and TORCH_CHECK +// +// Note: In the debug build With MSVC, __LINE__ might be of long type (a.k.a +// int32_t), which is different from the definition of `SourceLocation` that +// requires unsigned int (a.k.a uint32_t) and may cause a compile error with the +// message: error C2397: conversion from 'long' to 'uint32_t' requires a +// narrowing conversion Here the static cast is used to pass the build. if this +// is used inside a lambda the __func__ macro expands to operator(), which isn't +// very useful, but hard to fix in a macro so suppressing the warning. +#define C10_THROW_ERROR(err_type, msg) \ + throw ::c10::err_type( \ + {__func__, __FILE__, static_cast(__LINE__)}, msg) + +#define C10_BUILD_ERROR(err_type, msg) \ + ::c10::err_type({__func__, __FILE__, static_cast(__LINE__)}, msg) + +// Private helper macro for workaround MSVC misexpansion of nested macro +// invocations involving __VA_ARGS__. See +// https://stackoverflow.com/questions/5134523/msvc-doesnt-expand-va-args-correctly +#define C10_EXPAND_MSVC_WORKAROUND(x) x + +// On nvcc, C10_UNLIKELY thwarts missing return statement analysis. In cases +// where the unlikely expression may be a constant, use this macro to ensure +// return statement analysis keeps working (at the cost of not getting the +// likely/unlikely annotation on nvcc). +// https://github.com/pytorch/pytorch/issues/21418 +// +// Currently, this is only used in the error reporting macros below. If you +// want to use it more generally, move me to Macros.h +// +// TODO: Brian Vaughan observed that we might be able to get this to work on +// nvcc by writing some sort of C++ overload that distinguishes constexpr inputs +// from non-constexpr. Since there isn't any evidence that losing C10_UNLIKELY +// in nvcc is causing us perf problems, this is not yet implemented, but this +// might be an interesting piece of C++ code for an intrepid bootcamper to +// write. +#if defined(__CUDACC__) +#define C10_UNLIKELY_OR_CONST(e) e +#else +#define C10_UNLIKELY_OR_CONST(e) C10_UNLIKELY(e) +#endif + +// ---------------------------------------------------------------------------- +// Error reporting macros +// ---------------------------------------------------------------------------- + +#ifdef STRIP_ERROR_MESSAGES +#define TORCH_RETHROW(e, ...) throw +#else +#define TORCH_RETHROW(e, ...) \ + do { \ + e.add_context(::c10::str(__VA_ARGS__)); \ + throw; \ + } while (false) +#endif + +// A utility macro to provide assert()-like functionality; that is, enforcement +// of internal invariants in code. It supports an arbitrary number of extra +// arguments (evaluated only on failure), which will be printed in the assert +// failure message using operator<< (this is useful to print some variables +// which may be useful for debugging.) +// +// Usage: +// TORCH_INTERNAL_ASSERT(should_be_true); +// TORCH_INTERNAL_ASSERT(x == 0, "x = ", x); +// +// Assuming no bugs in PyTorch, the conditions tested by this macro should +// always be true; e.g., it should be possible to disable all of these +// conditions without changing observable user behavior. If you would like to +// do error reporting for user input, please use TORCH_CHECK instead. +// +// NOTE: It is SAFE to use this macro in production code; on failure, this +// simply raises an exception, it does NOT unceremoniously quit the process +// (unlike assert()). +// +#ifdef STRIP_ERROR_MESSAGES +#define TORCH_INTERNAL_ASSERT(cond, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + ::c10::detail::torchCheckFail( \ + __func__, \ + __FILE__, \ + static_cast(__LINE__), \ + #cond " INTERNAL ASSERT FAILED at " C10_STRINGIZE(__FILE__)); \ + } +#else +// It would be nice if we could build a combined string literal out of +// the TORCH_INTERNAL_ASSERT prefix and a user-provided string literal +// as the first argument, but there doesn't seem to be any good way to +// do that while still supporting having a first argument that isn't a +// string literal. +#define TORCH_INTERNAL_ASSERT(cond, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + ::c10::detail::torchInternalAssertFail( \ + __func__, \ + __FILE__, \ + static_cast(__LINE__), \ + #cond \ + " INTERNAL ASSERT FAILED at " C10_STRINGIZE(__FILE__) ":" C10_STRINGIZE( \ + __LINE__) ", please report a bug to PyTorch. ", \ + c10::str(__VA_ARGS__)); \ + } +#endif + +// A utility macro to make it easier to test for error conditions from user +// input. Like TORCH_INTERNAL_ASSERT, it supports an arbitrary number of extra +// arguments (evaluated only on failure), which will be printed in the error +// message using operator<< (e.g., you can pass any object which has +// operator<< defined. Most objects in PyTorch have these definitions!) +// +// Usage: +// TORCH_CHECK(should_be_true); // A default error message will be provided +// // in this case; but we recommend writing an +// // explicit error message, as it is more +// // user friendly. +// TORCH_CHECK(x == 0, "Expected x to be 0, but got ", x); +// +// On failure, this macro will raise an exception. If this exception propagates +// to Python, it will convert into a Python RuntimeError. +// +// NOTE: It is SAFE to use this macro in production code; on failure, this +// simply raises an exception, it does NOT unceremoniously quit the process +// (unlike CHECK() from glog.) +// +#define TORCH_CHECK_WITH(error_t, cond, ...) \ + TORCH_CHECK_WITH_MSG(error_t, cond, "", __VA_ARGS__) + +#ifdef STRIP_ERROR_MESSAGES +#define TORCH_CHECK_MSG(cond, type, ...) \ + (#cond #type " CHECK FAILED at " C10_STRINGIZE(__FILE__)) +#define TORCH_CHECK_WITH_MSG(error_t, cond, type, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + C10_THROW_ERROR(Error, TORCH_CHECK_MSG(cond, type, __VA_ARGS__)); \ + } +#else + +namespace c10::detail { +template +decltype(auto) torchCheckMsgImpl(const char* /*msg*/, const Args&... args) { + return ::c10::str(args...); +} +inline C10_API const char* torchCheckMsgImpl(const char* msg) { + return msg; +} +// If there is just 1 user-provided C-string argument, use it. +inline C10_API const char* torchCheckMsgImpl( + const char* /*msg*/, + const char* args) { + return args; +} +} // namespace c10::detail + +#define TORCH_CHECK_MSG(cond, type, ...) \ + (::c10::detail::torchCheckMsgImpl( \ + "Expected " #cond \ + " to be true, but got false. " \ + "(Could this error message be improved? If so, " \ + "please report an enhancement request to PyTorch.)", \ + ##__VA_ARGS__)) +#define TORCH_CHECK_WITH_MSG(error_t, cond, type, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + C10_THROW_ERROR(error_t, TORCH_CHECK_MSG(cond, type, __VA_ARGS__)); \ + } +#endif + +namespace c10::detail { + +[[noreturn]] C10_API void torchCheckFail( + const char* func, + const char* file, + uint32_t line, + const std::string& msg); +[[noreturn]] C10_API void torchCheckFail( + const char* func, + const char* file, + uint32_t line, + const char* msg); + +// The c10::str() call that creates userMsg can have 1 of 3 return +// types depending on the number and types of arguments passed to +// TORCH_INTERNAL_ASSERT. 0 arguments will get a +// CompileTimeEmptyString, 1 const char * will be passed straight +// through, and anything else will get converted to std::string. +[[noreturn]] C10_API void torchInternalAssertFail( + const char* func, + const char* file, + uint32_t line, + const char* condMsg, + const char* userMsg); +[[noreturn]] inline C10_API void torchInternalAssertFail( + const char* func, + const char* file, + uint32_t line, + const char* condMsg, + ::c10::detail::CompileTimeEmptyString /*userMsg*/) { + torchCheckFail(func, file, line, condMsg); +} +[[noreturn]] C10_API void torchInternalAssertFail( + const char* func, + const char* file, + uint32_t line, + const char* condMsg, + const std::string& userMsg); + +} // namespace c10::detail + +#ifdef STRIP_ERROR_MESSAGES +#define TORCH_CHECK(cond, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + ::c10::detail::torchCheckFail( \ + __func__, \ + __FILE__, \ + static_cast(__LINE__), \ + TORCH_CHECK_MSG(cond, "", __VA_ARGS__)); \ + } +#else +#define TORCH_CHECK(cond, ...) \ + if (C10_UNLIKELY_OR_CONST(!(cond))) { \ + ::c10::detail::torchCheckFail( \ + __func__, \ + __FILE__, \ + static_cast(__LINE__), \ + TORCH_CHECK_MSG(cond, "", ##__VA_ARGS__)); \ + } +#endif + +// An utility macro that does what `TORCH_CHECK` does if compiled in the host +// code, otherwise does nothing. Supposed to be used in the code shared between +// host and device code as an alternative for `TORCH_CHECK`. +#if defined(__CUDACC__) || defined(__HIPCC__) +#define TORCH_CHECK_IF_NOT_ON_CUDA(cond, ...) +#else +#define TORCH_CHECK_IF_NOT_ON_CUDA(cond, ...) TORCH_CHECK(cond, ##__VA_ARGS__) +#endif + +// Debug only version of TORCH_INTERNAL_ASSERT. This macro only checks in debug +// build, and does nothing in release build. It is appropriate to use +// in situations where you want to add an assert to a hotpath, but it is +// too expensive to run this assert on production builds. +#ifdef NDEBUG +// Optimized version - generates no code. +#define TORCH_INTERNAL_ASSERT_DEBUG_ONLY(...) \ + while (false) \ + C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__)) +#else +#define TORCH_INTERNAL_ASSERT_DEBUG_ONLY(...) \ + C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__)) +#endif + +// TODO: We're going to get a lot of similar looking string literals +// this way; check if this actually affects binary size. + +// Like TORCH_CHECK, but raises LinAlgError instead of Error. +#define TORCH_CHECK_LINALG(cond, ...) \ + TORCH_CHECK_WITH_MSG(LinAlgError, cond, "LINALG", __VA_ARGS__) + +// Like TORCH_CHECK, but raises IndexErrors instead of Errors. +#define TORCH_CHECK_INDEX(cond, ...) \ + TORCH_CHECK_WITH_MSG(IndexError, cond, "INDEX", __VA_ARGS__) + +// Like TORCH_CHECK, but raises ValueErrors instead of Errors. +#define TORCH_CHECK_VALUE(cond, ...) \ + TORCH_CHECK_WITH_MSG(ValueError, cond, "VALUE", __VA_ARGS__) + +// Like TORCH_CHECK, but raises TypeErrors instead of Errors. +#define TORCH_CHECK_TYPE(cond, ...) \ + TORCH_CHECK_WITH_MSG(TypeError, cond, "TYPE", __VA_ARGS__) + +// Like TORCH_CHECK, but raises NotImplementedErrors instead of Errors. +#define TORCH_CHECK_NOT_IMPLEMENTED(cond, ...) \ + TORCH_CHECK_WITH_MSG(NotImplementedError, cond, "TYPE", __VA_ARGS__) + +#define TORCH_CHECK_ALWAYS_SHOW_CPP_STACKTRACE(cond, ...) \ + TORCH_CHECK_WITH_MSG( \ + ErrorAlwaysShowCppStacktrace, cond, "TYPE", ##__VA_ARGS__) + +#ifdef STRIP_ERROR_MESSAGES +#define WARNING_MESSAGE_STRING(...) \ + ::c10::detail::CompileTimeEmptyString {} +#else +#define WARNING_MESSAGE_STRING(...) ::c10::str(__VA_ARGS__) +#endif + +// Report a warning to the user. Accepts an arbitrary number of extra +// arguments which are concatenated into the warning message using operator<< +// +#ifdef DISABLE_WARN +#define _TORCH_WARN_WITH(...) ((void)0); +#else +#define _TORCH_WARN_WITH(warning_t, ...) \ + ::c10::warn(::c10::Warning( \ + warning_t(), \ + {__func__, __FILE__, static_cast(__LINE__)}, \ + WARNING_MESSAGE_STRING(__VA_ARGS__), \ + false)); +#endif + +#define TORCH_WARN(...) _TORCH_WARN_WITH(::c10::UserWarning, __VA_ARGS__); + +#define TORCH_WARN_DEPRECATION(...) \ + _TORCH_WARN_WITH(::c10::DeprecationWarning, __VA_ARGS__); + +// Report a warning to the user only once. Accepts an arbitrary number of extra +// arguments which are concatenated into the warning message using operator<< +// +#define _TORCH_WARN_ONCE(...) \ + C10_UNUSED static const auto C10_ANONYMOUS_VARIABLE(torch_warn_once_) = \ + [&] { \ + TORCH_WARN(__VA_ARGS__); \ + return true; \ + }() + +#ifdef DISABLE_WARN +#define TORCH_WARN_ONCE(...) ((void)0); +#else +#define TORCH_WARN_ONCE(...) \ + if (::c10::WarningUtils::get_warnAlways()) { \ + TORCH_WARN(__VA_ARGS__); \ + } else { \ + _TORCH_WARN_ONCE(__VA_ARGS__); \ + } +#endif + +// Report an error with a specific argument +// NOTE: using the argument name in TORCH_CHECK's message is preferred +#define TORCH_CHECK_ARG(cond, argN, ...) \ + TORCH_CHECK(cond, "invalid argument ", argN, ": ", __VA_ARGS__) + +// ---------------------------------------------------------------------------- +// Deprecated macros +// ---------------------------------------------------------------------------- + +namespace c10::detail { + +/* +// Deprecation disabled until we fix sites in our codebase +C10_DEPRECATED_MESSAGE("AT_ERROR(msg) is deprecated, use TORCH_CHECK(false, msg) +instead.") +*/ +inline void deprecated_AT_ERROR() {} + +/* +// Deprecation disabled until we fix sites in our codebase +C10_DEPRECATED_MESSAGE("AT_ASSERT is deprecated, if you mean to indicate an +internal invariant failure, use " \ + "TORCH_INTERNAL_ASSERT instead; if you mean to do user +error checking, use " \ "TORCH_CHECK. See +https://github.com/pytorch/pytorch/issues/20287 for more details.") +*/ +inline void deprecated_AT_ASSERT() {} + +/* +// Deprecation disabled until we fix sites in our codebase +C10_DEPRECATED_MESSAGE("AT_ASSERTM is deprecated, if you mean to indicate an +internal invariant failure, use " \ + "TORCH_INTERNAL_ASSERT instead; if you mean to do user +error checking, use " \ "TORCH_CHECK. See +https://github.com/pytorch/pytorch/issues/20287 for more details.") +*/ +inline void deprecated_AT_ASSERTM() {} + +} // namespace c10::detail + +// Deprecated alias; this alias was deprecated because people kept mistakenly +// using it for user error checking. Use TORCH_INTERNAL_ASSERT or TORCH_CHECK +// instead. See https://github.com/pytorch/pytorch/issues/20287 for more +// details. +#define AT_ASSERT(...) \ + do { \ + ::c10::detail::deprecated_AT_ASSERT(); \ + C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(__VA_ARGS__)); \ + } while (false) + +// Deprecated alias, like AT_ASSERT. The new TORCH_INTERNAL_ASSERT macro +// supports both 0-ary and variadic calls, so having a separate +// message-accepting macro is not necessary. +// +// NB: we MUST include cond explicitly here, as MSVC will miscompile the macro +// expansion, shunting all of __VA_ARGS__ to cond. An alternate workaround +// can be seen at +// https://stackoverflow.com/questions/5134523/msvc-doesnt-expand-va-args-correctly +#define AT_ASSERTM(cond, ...) \ + do { \ + ::c10::detail::deprecated_AT_ASSERTM(); \ + C10_EXPAND_MSVC_WORKAROUND(TORCH_INTERNAL_ASSERT(cond, __VA_ARGS__)); \ + } while (false) + +// Deprecated alias; this alias was deprecated because it represents extra API +// surface that makes it hard for people to understand what macro to use. +// Use TORCH_CHECK(false, ...) or TORCH_INTERNAL_ASSERT(false, ...) to +// unconditionally fail at a line of code. +#define AT_ERROR(...) \ + do { \ + ::c10::detail::deprecated_AT_ERROR(); \ + C10_EXPAND_MSVC_WORKAROUND(TORCH_CHECK(false, ::c10::str(__VA_ARGS__))); \ + } while (false) + +#endif // C10_UTIL_EXCEPTION_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/ExclusivelyOwnedTensorTraits.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/ExclusivelyOwnedTensorTraits.h new file mode 100644 index 0000000000000000000000000000000000000000..73ff45b8c38d8393c44bd7ba128bea91fa2dd943 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/ExclusivelyOwnedTensorTraits.h @@ -0,0 +1,75 @@ +#pragma once + +#include +#include + +#include + +namespace c10 { +// Shared ExclusivelyOwnedTraits implementation between caffe2::Tensor and +// at::TensorBase. +template +struct ExclusivelyOwnedTensorTraits { + using repr_type = TensorType; + using pointer_type = TensorType*; + using const_pointer_type = const TensorType*; + + static repr_type nullRepr() { + return TensorType(); + } + + template + static repr_type createInPlace(Args&&... args) { + return TensorType(std::forward(args)...); + } + + static repr_type moveToRepr(TensorType&& x) { + return std::move(x); + } + + static void destroyOwned(TensorType& x) { + TensorImpl* const toDestroy = x.unsafeReleaseTensorImpl(); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + toDestroy != nullptr, "Tensor somehow got null TensorImpl?"); + // May be 0 because UndefinedTensorImpl doesn't get its refcount + // incremented. + const bool isUndefined = toDestroy == UndefinedTensorImpl::singleton(); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + toDestroy->refcount_ == 1 || (toDestroy->refcount_ == 0 && isUndefined), + "ExclusivelyOwned destroyed with isUndefined ", + isUndefined, + " and refcount ", + toDestroy->refcount_, + ", expected 1 or, if isUndefined, 0!"); + TORCH_INTERNAL_ASSERT_DEBUG_ONLY( + toDestroy->weakcount_ == 1 || + (toDestroy->weakcount_ == 0 && + toDestroy == UndefinedTensorImpl::singleton()), + "ExclusivelyOwned destroyed with isUndefined ", + isUndefined, + " and weakcount ", + toDestroy->weakcount_, + ", expected 1 or, if isUndefined, 0!"); + if (!isUndefined) { +#ifndef NDEBUG + // Needed to pass the debug assertions in ~intrusive_ptr_target. + toDestroy->refcount_ = 0; + toDestroy->weakcount_ = 0; +#endif + delete toDestroy; + } + } + + static TensorType take(TensorType& x) { + return std::move(x); + } + + static pointer_type getImpl(repr_type& x) { + return &x; + } + + static const_pointer_type getImpl(const repr_type& x) { + return &x; + } +}; +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2.h new file mode 100644 index 0000000000000000000000000000000000000000..c05f974067a7bd8e8ec396544e5343e1e70ab70b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2.h @@ -0,0 +1,143 @@ +#pragma once + +/// Defines the Float8_e5m2 type (8-bit floating-point) including conversions +/// to standard C types and basic arithmetic operations. Note that arithmetic +/// operations are implemented by converting to floating point and +/// performing the operation in float32. +/// Binary configuration: +/// s eeeee mm +/// 1 sign bit +/// 5 exponent bits +/// 2 mantissa bits +/// bias = 15 +/// +/// Implementation based on the paper https://arxiv.org/pdf/2209.05433.pdf +/// and inspired by Half implementation from pytorch/c10/util/Half.h + +#include + +namespace c10 { + +namespace detail { + +/* + * Convert a 8-bit floating-point number in fp8 E5M2 format, in bit + * representation, to a 32-bit floating-point number in IEEE single-precision + * format, in bit representation. + * + * @note The implementation doesn't use any floating-point operations. + */ +inline C10_HOST_DEVICE float fp8e5m2_to_fp32_value(uint8_t input) { + /* + * Extend the fp8 E5M2 number to 32 bits and shift to the + * upper part of the 32-bit word: + * +---+----+---+-----------------------------+ + * | S |EEEEE|MM|0000 0000 0000 0000 0000 0000| + * +---+----+---+-----------------------------+ + * Bits 31 26-30 24-25 0-23 + * + * S - sign bit, E - bits of the biased exponent, M - bits of the mantissa, 0 + * - zero bits. + */ + uint16_t half_representation = input; + half_representation <<= 8; + return fp16_ieee_to_fp32_value(half_representation); +} + +/* + * Convert a 32-bit floating-point number in IEEE single-precision format to a + * 8-bit floating-point number in fp8 E5M2 format, in bit representation. + */ +inline C10_HOST_DEVICE uint8_t fp8e5m2_from_fp32_value(float f) { + /* + * Binary representation of fp32 infinity + * 0 11111111 00000000000000000000000 + */ + constexpr uint32_t fp32_inf = UINT32_C(255) << 23; + + /* + * Binary representation of 65536.0f, which is the first value + * not representable in fp8e5m2 range: + * 0 11111 00 - fp8e5m2 + * 0 10001111 00000000000000000000000 - fp32 + */ + constexpr uint32_t fp8_max = UINT32_C(143) << 23; + + /* + * A mask for converting fp32 numbers lower than fp8e5m2 normal range + * into denorm representation + * magic number: ((127 - 15) + (23 - 2) + 1) + */ + constexpr uint32_t denorm_mask = UINT32_C(134) << 23; + + uint32_t f_bits = fp32_to_bits(f); + uint8_t result = 0u; + + /* + * Extract the sign of the input number into the high bit of the 32-bit word: + * + * +---+----------------------------------+ + * | S |0000000 00000000 00000000 00000000| + * +---+----------------------------------+ + * Bits 31 0-31 + */ + const uint32_t sign = f_bits & UINT32_C(0x80000000); + + /* + * Set sign bit to 0 + */ + f_bits ^= sign; + + if (f_bits >= fp8_max) { + // NaN - all exponent and mantissa bits set to 1 + result = f_bits > fp32_inf ? UINT8_C(0x7F) : UINT8_C(0x7C); + } else { + if (f_bits < (UINT32_C(113) << 23)) { + // Input number is smaller than 2^(-14), which is the smallest + // fp8e5m2 normal number + f_bits = + fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask)); + result = static_cast(f_bits - denorm_mask); + } else { + // resulting mantissa is odd + uint32_t mant_odd = (f_bits >> 21) & 1; + + // update exponent, rounding bias part 1 + f_bits += ((uint32_t)(15 - 127) << 23) + 0xFFFFF; + + // rounding bias part 2 + f_bits += mant_odd; + + // take the bits! + result = static_cast(f_bits >> 21); + } + } + + result |= static_cast(sign >> 24); + return result; +} + +} // namespace detail + +struct alignas(1) Float8_e5m2 { + uint8_t x; + + struct from_bits_t {}; + C10_HOST_DEVICE static constexpr from_bits_t from_bits() { + return from_bits_t(); + } + + Float8_e5m2() = default; + + constexpr C10_HOST_DEVICE Float8_e5m2(uint8_t bits, from_bits_t) : x(bits) {} + inline C10_HOST_DEVICE Float8_e5m2(float value); + inline C10_HOST_DEVICE operator float() const; + inline C10_HOST_DEVICE bool isnan() const; + inline C10_HOST_DEVICE bool isinf() const; +}; + +C10_API std::ostream& operator<<(std::ostream& out, const Float8_e5m2& value); + +} // namespace c10 + +#include // IWYU pragma: keep diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz.h new file mode 100644 index 0000000000000000000000000000000000000000..f43a912c01bdebf9028438e8f0c69cc910f06ee5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Float8_e5m2fnuz.h @@ -0,0 +1,135 @@ +#pragma once + +/// Defines the Float8_e5m2fnuz type (8-bit floating-point) including +/// conversions to standard C types and basic arithmetic operations. Note that +/// arithmetic operations are implemented by converting to floating point and +/// performing the operation in float32. +/// Binary configuration remains the same as e5m2: +/// s eeeee mm +/// 1 sign bit +/// 5 exponent bits +/// 2 mantissa bits +/// The key differences that e5m2fnuz brings are: +/// bias = 16 +/// no infinities or negative zero +/// NaN only when sign bit is 1, rest all 0s +/// +/// Implementation based on the paper https://arxiv.org/pdf/2206.02915.pdf and +/// the existing Float8_e4m3fn implementation. + +#include +#include +#include + +#if defined(__cplusplus) && (__cplusplus >= 201103L) +#include +#elif !defined(__OPENCL_VERSION__) +#include +#include +#endif + +#include +#include + +namespace c10 { + +namespace detail { + +/* + * Convert a 32-bit floating-point number in IEEE single-precision format to a + * 8-bit floating-point number in fp8 E5M2 format, in bit representation. + */ +inline C10_HOST_DEVICE uint8_t fp8e5m2fnuz_from_fp32_value(float f) { + /* + * Binary representation of 65536.0f, which is the first value not + * representable (i.e. the first value which would overflow in to the sign + * bit, resulting in a NaN) in fp8e4m3fnuz range: + * 1 00000 00 - fp8e5m2fnuz + * 0 10001111 00000000000000000000000 - fp32 + */ + constexpr uint32_t fnuz_max = UINT32_C(0x8F) << 23; + + /* + * A mask for converting fp32 numbers lower than fp8e5m2fnuz normal range + * into denormalized representation. + * magic number: ((127 - 16) + (23 - 2) + 1) + */ + constexpr uint32_t denorm_mask = UINT32_C(0x85) << 23; + + uint32_t f_bits = fp32_to_bits(f); + uint32_t result = 0u; + + /* + * Extract the sign of the input number into the high bit of the 32-bit word: + * + * +---+----------------------------------+ + * | S |0000000 00000000 00000000 00000000| + * +---+----------------------------------+ + * Bits 31 0-31 + */ + const uint32_t sign = f_bits & UINT32_C(0x80000000); + + /* + * Set sign bit to 0 + */ + f_bits ^= sign; + + if (f_bits >= fnuz_max) { + // NaN -- sign bit set to 1, rest 0s + return 0x80; + } + + if (f_bits < (UINT32_C(0x70) << 23) /* 2^-15 in float32 */) { + // Input exponent is less than -15, the smallest e5m2fnuz exponent, so the + // number will become subnormal. + f_bits = fp32_to_bits(fp32_from_bits(f_bits) + fp32_from_bits(denorm_mask)); + result = static_cast(f_bits - denorm_mask); + if (result == 0) { + // fnuz types don't have negative zero. + return 0; + } + } else { + // resulting mantissa is odd + uint8_t mant_odd = (f_bits >> 21) & 1; + + // update exponent, rounding bias part 1 + f_bits += ((uint32_t)(16 - 127) << 23) + 0xFFFFF; + + // rounding bias part 2 + f_bits += mant_odd; + + // take the bits! + result = static_cast(f_bits >> 21); + } + + result |= sign >> 24; + return result; +} + +} // namespace detail + +struct alignas(1) Float8_e5m2fnuz { + uint8_t x; + + struct from_bits_t {}; + C10_HOST_DEVICE static constexpr from_bits_t from_bits() { + return from_bits_t(); + } + + Float8_e5m2fnuz() = default; + + constexpr C10_HOST_DEVICE Float8_e5m2fnuz(uint8_t bits, from_bits_t) + : x(bits) {} + inline C10_HOST_DEVICE Float8_e5m2fnuz(float value); + inline C10_HOST_DEVICE operator float() const; + inline C10_HOST_DEVICE bool isnan() const; + inline C10_HOST_DEVICE bool isinf() const; +}; + +C10_API std::ostream& operator<<( + std::ostream& out, + const Float8_e5m2fnuz& value); + +} // namespace c10 + +#include // IWYU pragma: keep diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/IdWrapper.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/IdWrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..1426ee9362ae9f8a8aa361c5de1edaba0fdcb8a7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/IdWrapper.h @@ -0,0 +1,77 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +/** + * This template simplifies generation of simple classes that wrap an id + * in a typesafe way. Namely, you can use it to create a very lightweight + * type that only offers equality comparators and hashing. Example: + * + * struct MyIdType final : IdWrapper { + * constexpr explicit MyIdType(uint32_t id): IdWrapper(id) {} + * }; + * + * Then in the global top level namespace: + * + * C10_DEFINE_HASH_FOR_IDWRAPPER(MyIdType); + * + * That's it - equality operators and hash functions are automatically defined + * for you, given the underlying type supports it. + */ +template +class IdWrapper { + public: + using underlying_type = UnderlyingType; + using concrete_type = ConcreteType; + + protected: + constexpr explicit IdWrapper(underlying_type id) noexcept( + noexcept(underlying_type(std::declval()))) + : id_(id) {} + + constexpr underlying_type underlyingId() const + noexcept(noexcept(underlying_type(std::declval()))) { + return id_; + } + + private: + friend size_t hash_value(const concrete_type& v) { + return std::hash()(v.id_); + } + + // TODO Making operator== noexcept if underlying type is noexcept equality + // comparable doesn't work with GCC 4.8. + // Fix this once we don't need GCC 4.8 anymore. + friend constexpr bool operator==( + const concrete_type& lhs, + const concrete_type& rhs) noexcept { + return lhs.id_ == rhs.id_; + } + + // TODO Making operator!= noexcept if operator== is noexcept doesn't work with + // GCC 4.8. + // Fix this once we don't need GCC 4.8 anymore. + friend constexpr bool operator!=( + const concrete_type& lhs, + const concrete_type& rhs) noexcept { + return !(lhs == rhs); + } + + underlying_type id_; +}; + +} // namespace c10 + +#define C10_DEFINE_HASH_FOR_IDWRAPPER(ClassName) \ + namespace std { \ + template <> \ + struct hash { \ + size_t operator()(ClassName x) const { \ + return hash_value(x); \ + } \ + }; \ + } diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/LeftRight.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/LeftRight.h new file mode 100644 index 0000000000000000000000000000000000000000..58145b2c779cc38113c1916dec01f5fe8a75ef2e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/LeftRight.h @@ -0,0 +1,223 @@ +#include +#include +#include +#include +#include +#include + +namespace c10 { + +namespace detail { + +struct IncrementRAII final { + public: + explicit IncrementRAII(std::atomic* counter) : _counter(counter) { + _counter->fetch_add(1); + } + + ~IncrementRAII() { + _counter->fetch_sub(1); + } + + private: + std::atomic* _counter; + + C10_DISABLE_COPY_AND_ASSIGN(IncrementRAII); +}; + +} // namespace detail + +// LeftRight wait-free readers synchronization primitive +// https://hal.archives-ouvertes.fr/hal-01207881/document +// +// LeftRight is quite easy to use (it can make an arbitrary +// data structure permit wait-free reads), but it has some +// particular performance characteristics you should be aware +// of if you're deciding to use it: +// +// - Reads still incur an atomic write (this is how LeftRight +// keeps track of how long it needs to keep around the old +// data structure) +// +// - Writes get executed twice, to keep both the left and right +// versions up to date. So if your write is expensive or +// nondeterministic, this is also an inappropriate structure +// +// LeftRight is used fairly rarely in PyTorch's codebase. If you +// are still not sure if you need it or not, consult your local +// C++ expert. +// +template +class LeftRight final { + public: + template + explicit LeftRight(const Args&... args) + : _counters{{{0}, {0}}}, + _foregroundCounterIndex(0), + _foregroundDataIndex(0), + _data{{T{args...}, T{args...}}}, + _writeMutex() {} + + // Copying and moving would not be threadsafe. + // Needs more thought and careful design to make that work. + LeftRight(const LeftRight&) = delete; + LeftRight(LeftRight&&) noexcept = delete; + LeftRight& operator=(const LeftRight&) = delete; + LeftRight& operator=(LeftRight&&) noexcept = delete; + + ~LeftRight() { + // wait until any potentially running writers are finished + { std::unique_lock lock(_writeMutex); } + + // wait until any potentially running readers are finished + while (_counters[0].load() != 0 || _counters[1].load() != 0) { + std::this_thread::yield(); + } + } + + template + auto read(F&& readFunc) const { + detail::IncrementRAII _increment_counter( + &_counters[_foregroundCounterIndex.load()]); + + return std::forward(readFunc)(_data[_foregroundDataIndex.load()]); + } + + // Throwing an exception in writeFunc is ok but causes the state to be either + // the old or the new state, depending on if the first or the second call to + // writeFunc threw. + template + auto write(F&& writeFunc) { + std::unique_lock lock(_writeMutex); + + return _write(std::forward(writeFunc)); + } + + private: + template + auto _write(const F& writeFunc) { + /* + * Assume, A is in background and B in foreground. In simplified terms, we + * want to do the following: + * 1. Write to A (old background) + * 2. Switch A/B + * 3. Write to B (new background) + * + * More detailed algorithm (explanations on why this is important are below + * in code): + * 1. Write to A + * 2. Switch A/B data pointers + * 3. Wait until A counter is zero + * 4. Switch A/B counters + * 5. Wait until B counter is zero + * 6. Write to B + */ + + auto localDataIndex = _foregroundDataIndex.load(); + + // 1. Write to A + _callWriteFuncOnBackgroundInstance(writeFunc, localDataIndex); + + // 2. Switch A/B data pointers + localDataIndex = localDataIndex ^ 1; + _foregroundDataIndex = localDataIndex; + + /* + * 3. Wait until A counter is zero + * + * In the previous write run, A was foreground and B was background. + * There was a time after switching _foregroundDataIndex (B to foreground) + * and before switching _foregroundCounterIndex, in which new readers could + * have read B but incremented A's counter. + * + * In this current run, we just switched _foregroundDataIndex (A back to + * foreground), but before writing to the new background B, we have to make + * sure A's counter was zero briefly, so all these old readers are gone. + */ + auto localCounterIndex = _foregroundCounterIndex.load(); + _waitForBackgroundCounterToBeZero(localCounterIndex); + + /* + * 4. Switch A/B counters + * + * Now that we know all readers on B are really gone, we can switch the + * counters and have new readers increment A's counter again, which is the + * correct counter since they're reading A. + */ + localCounterIndex = localCounterIndex ^ 1; + _foregroundCounterIndex = localCounterIndex; + + /* + * 5. Wait until B counter is zero + * + * This waits for all the readers on B that came in while both data and + * counter for B was in foreground, i.e. normal readers that happened + * outside of that brief gap between switching data and counter. + */ + _waitForBackgroundCounterToBeZero(localCounterIndex); + + // 6. Write to B + return _callWriteFuncOnBackgroundInstance(writeFunc, localDataIndex); + } + + template + auto _callWriteFuncOnBackgroundInstance( + const F& writeFunc, + uint8_t localDataIndex) { + try { + return writeFunc(_data[localDataIndex ^ 1]); + } catch (...) { + // recover invariant by copying from the foreground instance + _data[localDataIndex ^ 1] = _data[localDataIndex]; + // rethrow + throw; + } + } + + void _waitForBackgroundCounterToBeZero(uint8_t counterIndex) { + while (_counters[counterIndex ^ 1].load() != 0) { + std::this_thread::yield(); + } + } + + mutable std::array, 2> _counters; + std::atomic _foregroundCounterIndex; + std::atomic _foregroundDataIndex; + std::array _data; + std::mutex _writeMutex; +}; + +// RWSafeLeftRightWrapper is API compatible with LeftRight and uses a +// read-write lock to protect T (data). +template +class RWSafeLeftRightWrapper final { + public: + template + explicit RWSafeLeftRightWrapper(const Args&... args) : data_{args...} {} + + // RWSafeLeftRightWrapper is not copyable or moveable since LeftRight + // is not copyable or moveable. + RWSafeLeftRightWrapper(const RWSafeLeftRightWrapper&) = delete; + RWSafeLeftRightWrapper(RWSafeLeftRightWrapper&&) noexcept = delete; + RWSafeLeftRightWrapper& operator=(const RWSafeLeftRightWrapper&) = delete; + RWSafeLeftRightWrapper& operator=(RWSafeLeftRightWrapper&&) noexcept = delete; + + template + // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward) + auto read(F&& readFunc) const { + return data_.withLock( + [&readFunc](T const& data) { return std::forward(readFunc)(data); }); + } + + template + // NOLINTNEXTLINE(cppcoreguidelines-missing-std-forward) + auto write(F&& writeFunc) { + return data_.withLock( + [&writeFunc](T& data) { return std::forward(writeFunc)(data); }); + } + + private: + c10::Synchronized data_; +}; + +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Optional.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Optional.h new file mode 100644 index 0000000000000000000000000000000000000000..70fa29e6f3914e01aaa489bfd9259b88066fb38b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Optional.h @@ -0,0 +1,48 @@ +#ifndef C10_UTIL_OPTIONAL_H_ +#define C10_UTIL_OPTIONAL_H_ + +#include +#include + +// Macros.h is not needed, but it does namespace shenanigans that lots +// of downstream code seems to rely on. Feel free to remove it and fix +// up builds. + +namespace c10 { +// NOLINTNEXTLINE(misc-unused-using-decls) +using std::bad_optional_access; +// NOLINTNEXTLINE(misc-unused-using-decls) +using std::make_optional; +// NOLINTNEXTLINE(misc-unused-using-decls) +using std::nullopt; +// NOLINTNEXTLINE(misc-unused-using-decls) +using std::nullopt_t; +// NOLINTNEXTLINE(misc-unused-using-decls) +using std::optional; + +namespace detail_ { +// the call to convert(b) has return type A and converts b to type A iff b +// decltype(b) is implicitly convertible to A +template +constexpr U convert(U v) { + return v; +} +} // namespace detail_ +template +constexpr T value_or_else(const optional& v, F&& func) { + static_assert( + std::is_convertible_v, T>, + "func parameters must be a callable that returns a type convertible to the value stored in the optional"); + return v.has_value() ? *v : detail_::convert(std::forward(func)()); +} + +template +constexpr T value_or_else(optional&& v, F&& func) { + static_assert( + std::is_convertible_v, T>, + "func parameters must be a callable that returns a type convertible to the value stored in the optional"); + return v.has_value() ? constexpr_move(std::move(v).contained_val()) + : detail_::convert(std::forward(func)()); +} +} // namespace c10 +#endif // C10_UTIL_OPTIONAL_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/OptionalArrayRef.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/OptionalArrayRef.h new file mode 100644 index 0000000000000000000000000000000000000000..2c2b88722d4d782482646a4e9ae01638886890ab --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/OptionalArrayRef.h @@ -0,0 +1,236 @@ +// This file defines OptionalArrayRef, a class that has almost the same +// exact functionality as c10::optional>, except that its +// converting constructor fixes a dangling pointer issue. +// +// The implicit converting constructor of both c10::optional> and +// std::optional> can cause the underlying ArrayRef to store +// a dangling pointer. OptionalArrayRef prevents this by wrapping +// a c10::optional> and fixing the constructor implementation. +// +// See https://github.com/pytorch/pytorch/issues/63645 for more on this. + +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace c10 { + +template +class OptionalArrayRef final { + public: + // Constructors + + constexpr OptionalArrayRef() noexcept = default; + + constexpr OptionalArrayRef(nullopt_t) noexcept {} + + OptionalArrayRef(const OptionalArrayRef& other) = default; + + OptionalArrayRef(OptionalArrayRef&& other) noexcept = default; + + constexpr OptionalArrayRef(const optional>& other) noexcept + : wrapped_opt_array_ref(other) {} + + constexpr OptionalArrayRef(optional>&& other) noexcept + : wrapped_opt_array_ref(std::move(other)) {} + + constexpr OptionalArrayRef(const T& value) noexcept + : wrapped_opt_array_ref(value) {} + + template < + typename U = ArrayRef, + std::enable_if_t< + !std::is_same_v, OptionalArrayRef> && + !std::is_same_v, std::in_place_t> && + std::is_constructible_v, U&&> && + std::is_convertible_v> && + !std::is_convertible_v, + bool> = false> + constexpr OptionalArrayRef(U&& value) noexcept( + std::is_nothrow_constructible_v, U&&>) + : wrapped_opt_array_ref(std::forward(value)) {} + + template < + typename U = ArrayRef, + std::enable_if_t< + !std::is_same_v, OptionalArrayRef> && + !std::is_same_v, std::in_place_t> && + std::is_constructible_v, U&&> && + !std::is_convertible_v>, + bool> = false> + constexpr explicit OptionalArrayRef(U&& value) noexcept( + std::is_nothrow_constructible_v, U&&>) + : wrapped_opt_array_ref(std::forward(value)) {} + + template + constexpr explicit OptionalArrayRef( + std::in_place_t ip, + Args&&... args) noexcept + : wrapped_opt_array_ref(ip, std::forward(args)...) {} + + template + constexpr explicit OptionalArrayRef( + std::in_place_t ip, + std::initializer_list il, + Args&&... args) + : wrapped_opt_array_ref(ip, il, std::forward(args)...) {} + + constexpr OptionalArrayRef(const std::initializer_list& Vec) + : wrapped_opt_array_ref(ArrayRef(Vec)) {} + + // Destructor + + ~OptionalArrayRef() = default; + + // Assignment + + constexpr OptionalArrayRef& operator=(nullopt_t) noexcept { + wrapped_opt_array_ref = c10::nullopt; + return *this; + } + + OptionalArrayRef& operator=(const OptionalArrayRef& other) = default; + + OptionalArrayRef& operator=(OptionalArrayRef&& other) noexcept = default; + + constexpr OptionalArrayRef& operator=( + const optional>& other) noexcept { + wrapped_opt_array_ref = other; + return *this; + } + + constexpr OptionalArrayRef& operator=( + optional>&& other) noexcept { + wrapped_opt_array_ref = std::move(other); + return *this; + } + + template < + typename U = ArrayRef, + typename = std::enable_if_t< + !std::is_same_v, OptionalArrayRef> && + std::is_constructible_v, U&&> && + std::is_assignable_v&, U&&>>> + constexpr OptionalArrayRef& operator=(U&& value) noexcept( + std::is_nothrow_constructible_v, U&&> && + std::is_nothrow_assignable_v&, U&&>) { + wrapped_opt_array_ref = std::forward(value); + return *this; + } + + // Observers + + constexpr ArrayRef* operator->() noexcept { + return &wrapped_opt_array_ref.value(); + } + + constexpr const ArrayRef* operator->() const noexcept { + return &wrapped_opt_array_ref.value(); + } + + constexpr ArrayRef& operator*() & noexcept { + return wrapped_opt_array_ref.value(); + } + + constexpr const ArrayRef& operator*() const& noexcept { + return wrapped_opt_array_ref.value(); + } + + constexpr ArrayRef&& operator*() && noexcept { + return std::move(wrapped_opt_array_ref.value()); + } + + constexpr const ArrayRef&& operator*() const&& noexcept { + return std::move(wrapped_opt_array_ref.value()); + } + + constexpr explicit operator bool() const noexcept { + return wrapped_opt_array_ref.has_value(); + } + + constexpr bool has_value() const noexcept { + return wrapped_opt_array_ref.has_value(); + } + + constexpr ArrayRef& value() & { + return wrapped_opt_array_ref.value(); + } + + constexpr const ArrayRef& value() const& { + return wrapped_opt_array_ref.value(); + } + + constexpr ArrayRef&& value() && { + return std::move(wrapped_opt_array_ref.value()); + } + + constexpr const ArrayRef&& value() const&& { + return std::move(wrapped_opt_array_ref.value()); + } + + template + constexpr std:: + enable_if_t>, ArrayRef> + value_or(U&& default_value) const& { + return wrapped_opt_array_ref.value_or(std::forward(default_value)); + } + + template + constexpr std:: + enable_if_t>, ArrayRef> + value_or(U&& default_value) && { + return wrapped_opt_array_ref.value_or(std::forward(default_value)); + } + + // Modifiers + + constexpr void swap(OptionalArrayRef& other) noexcept { + std::swap(wrapped_opt_array_ref, other.wrapped_opt_array_ref); + } + + constexpr void reset() noexcept { + wrapped_opt_array_ref.reset(); + } + + template + constexpr std:: + enable_if_t, Args&&...>, ArrayRef&> + emplace(Args&&... args) noexcept( + std::is_nothrow_constructible_v, Args&&...>) { + return wrapped_opt_array_ref.emplace(std::forward(args)...); + } + + template + constexpr ArrayRef& emplace( + std::initializer_list il, + Args&&... args) noexcept { + return wrapped_opt_array_ref.emplace(il, std::forward(args)...); + } + + private: + optional> wrapped_opt_array_ref; +}; + +using OptionalIntArrayRef = OptionalArrayRef; + +inline bool operator==( + const OptionalIntArrayRef& a1, + const IntArrayRef& other) { + if (!a1.has_value()) { + return false; + } + return a1.value() == other; +} + +inline bool operator==( + const c10::IntArrayRef& a1, + const c10::OptionalIntArrayRef& a2) { + return a2 == a1; +} + +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Registry.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Registry.h new file mode 100644 index 0000000000000000000000000000000000000000..3dd3ec54fd9753e8b4be792b49cde105940fb901 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Registry.h @@ -0,0 +1,326 @@ +#ifndef C10_UTIL_REGISTRY_H_ +#define C10_UTIL_REGISTRY_H_ + +/** + * Simple registry implementation that uses static variables to + * register object creators during program initialization time. + */ + +// NB: This Registry works poorly when you have other namespaces. +// Make all macro invocations from inside the at namespace. + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace c10 { + +template +inline std::string KeyStrRepr(const KeyType& /*key*/) { + return "[key type printing not supported]"; +} + +template <> +inline std::string KeyStrRepr(const std::string& key) { + return key; +} + +enum RegistryPriority { + REGISTRY_FALLBACK = 1, + REGISTRY_DEFAULT = 2, + REGISTRY_PREFERRED = 3, +}; + +/** + * @brief A template class that allows one to register classes by keys. + * + * The keys are usually a std::string specifying the name, but can be anything + * that can be used in a std::map. + * + * You should most likely not use the Registry class explicitly, but use the + * helper macros below to declare specific registries as well as registering + * objects. + */ +template +class Registry { + public: + typedef std::function Creator; + + Registry(bool warning = true) : registry_(), priority_(), warning_(warning) {} + + void Register( + const SrcType& key, + Creator creator, + const RegistryPriority priority = REGISTRY_DEFAULT) { + std::lock_guard lock(register_mutex_); + // The if statement below is essentially the same as the following line: + // TORCH_CHECK_EQ(registry_.count(key), 0) << "Key " << key + // << " registered twice."; + // However, TORCH_CHECK_EQ depends on google logging, and since registration + // is carried out at static initialization time, we do not want to have an + // explicit dependency on glog's initialization function. + if (registry_.count(key) != 0) { + auto cur_priority = priority_[key]; + if (priority > cur_priority) { +#ifdef DEBUG + std::string warn_msg = + "Overwriting already registered item for key " + KeyStrRepr(key); + fprintf(stderr, "%s\n", warn_msg.c_str()); +#endif + registry_[key] = creator; + priority_[key] = priority; + } else if (priority == cur_priority) { + std::string err_msg = + "Key already registered with the same priority: " + KeyStrRepr(key); + fprintf(stderr, "%s\n", err_msg.c_str()); + if (terminate_) { + std::exit(1); + } else { + throw std::runtime_error(err_msg); + } + } else if (warning_) { + std::string warn_msg = + "Higher priority item already registered, skipping registration of " + + KeyStrRepr(key); + fprintf(stderr, "%s\n", warn_msg.c_str()); + } + } else { + registry_[key] = creator; + priority_[key] = priority; + } + } + + void Register( + const SrcType& key, + Creator creator, + const std::string& help_msg, + const RegistryPriority priority = REGISTRY_DEFAULT) { + Register(key, creator, priority); + help_message_[key] = help_msg; + } + + inline bool Has(const SrcType& key) { + return (registry_.count(key) != 0); + } + + ObjectPtrType Create(const SrcType& key, Args... args) { + auto it = registry_.find(key); + if (it == registry_.end()) { + // Returns nullptr if the key is not registered. + return nullptr; + } + return it->second(args...); + } + + /** + * Returns the keys currently registered as a std::vector. + */ + std::vector Keys() const { + std::vector keys; + keys.reserve(registry_.size()); + for (const auto& it : registry_) { + keys.push_back(it.first); + } + return keys; + } + + inline const std::unordered_map& HelpMessage() const { + return help_message_; + } + + const char* HelpMessage(const SrcType& key) const { + auto it = help_message_.find(key); + if (it == help_message_.end()) { + return nullptr; + } + return it->second.c_str(); + } + + // Used for testing, if terminate is unset, Registry throws instead of + // calling std::exit + void SetTerminate(bool terminate) { + terminate_ = terminate; + } + + private: + std::unordered_map registry_; + std::unordered_map priority_; + bool terminate_{true}; + const bool warning_; + std::unordered_map help_message_; + std::mutex register_mutex_; + + C10_DISABLE_COPY_AND_ASSIGN(Registry); +}; + +template +class Registerer { + public: + explicit Registerer( + const SrcType& key, + Registry* registry, + typename Registry::Creator creator, + const std::string& help_msg = "") { + registry->Register(key, creator, help_msg); + } + + explicit Registerer( + const SrcType& key, + const RegistryPriority priority, + Registry* registry, + typename Registry::Creator creator, + const std::string& help_msg = "") { + registry->Register(key, creator, help_msg, priority); + } + + template + static ObjectPtrType DefaultCreator(Args... args) { + return ObjectPtrType(new DerivedType(args...)); + } +}; + +/** + * C10_DECLARE_TYPED_REGISTRY is a macro that expands to a function + * declaration, as well as creating a convenient typename for its corresponding + * registerer. + */ +// Note on C10_IMPORT and C10_EXPORT below: we need to explicitly mark DECLARE +// as import and DEFINE as export, because these registry macros will be used +// in downstream shared libraries as well, and one cannot use *_API - the API +// macro will be defined on a per-shared-library basis. Semantically, when one +// declares a typed registry it is always going to be IMPORT, and when one +// defines a registry (which should happen ONLY ONCE and ONLY IN SOURCE FILE), +// the instantiation unit is always going to be exported. +// +// The only unique condition is when in the same file one does DECLARE and +// DEFINE - in Windows compilers, this generates a warning that dllimport and +// dllexport are mixed, but the warning is fine and linker will be properly +// exporting the symbol. Same thing happens in the gflags flag declaration and +// definition caes. +#define C10_DECLARE_TYPED_REGISTRY( \ + RegistryName, SrcType, ObjectType, PtrType, ...) \ + C10_API ::c10::Registry, ##__VA_ARGS__>* \ + RegistryName(); \ + typedef ::c10::Registerer, ##__VA_ARGS__> \ + Registerer##RegistryName + +#define TORCH_DECLARE_TYPED_REGISTRY( \ + RegistryName, SrcType, ObjectType, PtrType, ...) \ + TORCH_API ::c10::Registry, ##__VA_ARGS__>* \ + RegistryName(); \ + typedef ::c10::Registerer, ##__VA_ARGS__> \ + Registerer##RegistryName + +#define C10_DEFINE_TYPED_REGISTRY( \ + RegistryName, SrcType, ObjectType, PtrType, ...) \ + C10_EXPORT ::c10::Registry, ##__VA_ARGS__>* \ + RegistryName() { \ + static ::c10::Registry, ##__VA_ARGS__>* \ + registry = new ::c10:: \ + Registry, ##__VA_ARGS__>(); \ + return registry; \ + } + +#define C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \ + RegistryName, SrcType, ObjectType, PtrType, ...) \ + C10_EXPORT ::c10::Registry, ##__VA_ARGS__>* \ + RegistryName() { \ + static ::c10::Registry, ##__VA_ARGS__>* \ + registry = \ + new ::c10::Registry, ##__VA_ARGS__>( \ + false); \ + return registry; \ + } + +// Note(Yangqing): The __VA_ARGS__ below allows one to specify a templated +// creator with comma in its templated arguments. +#define C10_REGISTER_TYPED_CREATOR(RegistryName, key, ...) \ + static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \ + key, RegistryName(), ##__VA_ARGS__); + +#define C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( \ + RegistryName, key, priority, ...) \ + static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \ + key, priority, RegistryName(), ##__VA_ARGS__); + +#define C10_REGISTER_TYPED_CLASS(RegistryName, key, ...) \ + static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \ + key, \ + RegistryName(), \ + Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, \ + ::c10::demangle_type<__VA_ARGS__>()); + +#define C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( \ + RegistryName, key, priority, ...) \ + static Registerer##RegistryName C10_ANONYMOUS_VARIABLE(g_##RegistryName)( \ + key, \ + priority, \ + RegistryName(), \ + Registerer##RegistryName::DefaultCreator<__VA_ARGS__>, \ + ::c10::demangle_type<__VA_ARGS__>()); + +// C10_DECLARE_REGISTRY and C10_DEFINE_REGISTRY are hard-wired to use +// std::string as the key type, because that is the most commonly used cases. +#define C10_DECLARE_REGISTRY(RegistryName, ObjectType, ...) \ + C10_DECLARE_TYPED_REGISTRY( \ + RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) + +#define TORCH_DECLARE_REGISTRY(RegistryName, ObjectType, ...) \ + TORCH_DECLARE_TYPED_REGISTRY( \ + RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) + +#define C10_DEFINE_REGISTRY(RegistryName, ObjectType, ...) \ + C10_DEFINE_TYPED_REGISTRY( \ + RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) + +#define C10_DEFINE_REGISTRY_WITHOUT_WARNING(RegistryName, ObjectType, ...) \ + C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \ + RegistryName, std::string, ObjectType, std::unique_ptr, ##__VA_ARGS__) + +#define C10_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \ + C10_DECLARE_TYPED_REGISTRY( \ + RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) + +#define TORCH_DECLARE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \ + TORCH_DECLARE_TYPED_REGISTRY( \ + RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) + +#define C10_DEFINE_SHARED_REGISTRY(RegistryName, ObjectType, ...) \ + C10_DEFINE_TYPED_REGISTRY( \ + RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) + +#define C10_DEFINE_SHARED_REGISTRY_WITHOUT_WARNING( \ + RegistryName, ObjectType, ...) \ + C10_DEFINE_TYPED_REGISTRY_WITHOUT_WARNING( \ + RegistryName, std::string, ObjectType, std::shared_ptr, ##__VA_ARGS__) + +// C10_REGISTER_CREATOR and C10_REGISTER_CLASS are hard-wired to use std::string +// as the key +// type, because that is the most commonly used cases. +#define C10_REGISTER_CREATOR(RegistryName, key, ...) \ + C10_REGISTER_TYPED_CREATOR(RegistryName, #key, __VA_ARGS__) + +#define C10_REGISTER_CREATOR_WITH_PRIORITY(RegistryName, key, priority, ...) \ + C10_REGISTER_TYPED_CREATOR_WITH_PRIORITY( \ + RegistryName, #key, priority, __VA_ARGS__) + +#define C10_REGISTER_CLASS(RegistryName, key, ...) \ + C10_REGISTER_TYPED_CLASS(RegistryName, #key, __VA_ARGS__) + +#define C10_REGISTER_CLASS_WITH_PRIORITY(RegistryName, key, priority, ...) \ + C10_REGISTER_TYPED_CLASS_WITH_PRIORITY( \ + RegistryName, #key, priority, __VA_ARGS__) + +} // namespace c10 + +#endif // C10_UTIL_REGISTRY_H_ diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/SmallBuffer.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/SmallBuffer.h new file mode 100644 index 0000000000000000000000000000000000000000..a4c243ab1c9dc998055e1a6b42700896c3ddc0cb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/SmallBuffer.h @@ -0,0 +1,87 @@ +#pragma once +#include +#include +#include +#include + +/** Helper class for allocating temporary fixed size arrays with SBO. + * + * This is intentionally much simpler than SmallVector, to improve performance + * at the expense of many features: + * - No zero-initialization for numeric types + * - No resizing after construction + * - No copy/move + * - No non-trivial types + */ + +namespace c10 { + +template +class SmallBuffer { + static_assert(std::is_trivial_v, "SmallBuffer is intended for POD types"); + + std::array storage_; + size_t size_{}; + T* data_{}; + + public: + SmallBuffer(size_t size) : size_(size) { + if (size > N) { + data_ = new T[size]; + } else { + data_ = &storage_[0]; + } + } + + SmallBuffer(const SmallBuffer&) = delete; + SmallBuffer& operator=(const SmallBuffer&) = delete; + + // move constructor is needed in function return + SmallBuffer(SmallBuffer&& rhs) noexcept : size_{rhs.size_} { + rhs.size_ = 0; + if (size_ > N) { + data_ = rhs.data_; + rhs.data_ = nullptr; + } else { + storage_ = std::move(rhs.storage_); + data_ = &storage_[0]; + } + } + + SmallBuffer& operator=(SmallBuffer&&) = delete; + + ~SmallBuffer() { + if (size_ > N) { + delete[] data_; + } + } + T& operator[](size_t idx) { + return data()[idx]; + } + const T& operator[](size_t idx) const { + return data()[idx]; + } + T* data() { + return data_; + } + const T* data() const { + return data_; + } + size_t size() const { + return size_; + } + T* begin() { + return data_; + } + const T* begin() const { + return data_; + } + T* end() { + return data_ + size_; + } + const T* end() const { + return data_ + size_; + } +}; + +} // namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Synchronized.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Synchronized.h new file mode 100644 index 0000000000000000000000000000000000000000..65035ce6aab72d0ad8e0d9567ed09637c673740a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/Synchronized.h @@ -0,0 +1,61 @@ +#pragma once + +#include + +namespace c10 { + +/** + * A very simple Synchronization class for error-free use of data + * in a multi-threaded context. See folly/docs/Synchronized.md for + * the inspiration of this class. + * + * Full URL: + * https://github.com/facebook/folly/blob/main/folly/docs/Synchronized.md + * + * This class implements a small subset of the generic functionality + * implemented by folly:Synchronized. Specifically, only withLock + * is implemented here since it's the smallest possible API that is + * able to cover a large surface area of functionality offered by + * folly::Synchronized. + */ +template +class Synchronized final { + mutable std::mutex mutex_; + T data_; + + public: + Synchronized() = default; + Synchronized(T const& data) : data_(data) {} + Synchronized(T&& data) : data_(std::move(data)) {} + + // Don't permit copy construction, move, assignment, or + // move assignment, since the underlying std::mutex + // isn't necessarily copyable/moveable. + Synchronized(Synchronized const&) = delete; + Synchronized(Synchronized&&) = delete; + Synchronized operator=(Synchronized const&) = delete; + Synchronized operator=(Synchronized&&) = delete; + + /** + * To use, call withLock with a callback that accepts T either + * by copy or by reference. Use the protected variable in the + * provided callback safely. + */ + template + auto withLock(CB&& cb) { + std::lock_guard guard(this->mutex_); + return std::forward(cb)(this->data_); + } + + /** + * To use, call withLock with a callback that accepts T either + * by copy or by const reference. Use the protected variable in + * the provided callback safely. + */ + template + auto withLock(CB&& cb) const { + std::lock_guard guard(this->mutex_); + return std::forward(cb)(this->data_); + } +}; +} // end namespace c10 diff --git a/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/TypeList.h b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/TypeList.h new file mode 100644 index 0000000000000000000000000000000000000000..a540a0c5c674474c80d209edbf20eb891f1e199c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/include/c10/util/TypeList.h @@ -0,0 +1,515 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace c10::guts { + +template +struct false_t : std::false_type {}; +template